With these changes kernel/sched directories of branches android-msm-8998-4.4-common (under partner-android/kernel/private/msm-qcom) and android-4.4 (under partner-android/kernel/common) match Bug: 32492390 Test: MTP boots to UI and successfully runs Vellamo and Benchmark tests defconfig: re-add interactive governor qcom-cpufreq: remove calls to trace cpu frequency switch Revert "trace: cpu_freq_switch: Add profiler for CPU frequency switch times"dd2460fDEBUG: sched,cpufreq: add cpu_capacity change tracepoint7bb5218cgroup: Remove leftover instances of allow_attache78f134CHROMIUM: remove Android's cgroup generic permissions checks53c63d3sched: Add Kconfig option DEFAULT_USE_ENERGY_AWARE to set ENERGY_AWARE feature flagdf23243sched/fair: remove printk while schedule is in progresscf43809sched/walt: Drop arch-specific timer access5449876eas/sched/fair: Fixing comments in find_best_target.a85045cFIXUP: sched/tune: update accouting before CPU capacityfb3cce0FIXUP: sched/tune: add fixes missing from a previous patchaeb4a31sched/walt: use do_div instead of division operator6c7e03dDEBUG: cpufreq: fix cpu_capacity tracing build for non-smp systems1c81f45sched/walt: include missing header for arm_timer_read_counter()8133095cpufreq: Kconfig: Fixup incorrect selection by CPU_FREQ_DEFAULT_GOV_SCHED7655461sched/fair: Avoid redundant idle_cpu() call in update_sg_lb_stats()34828bdFIXUP: sched: scheduler-driven cpu frequency selection2a44453sched/rt: Add Kconfig option to enable panicking for RT throttling989f33fsched/rt: print RT tasks when RT throttling is activated3276c3dUPSTREAM: sched: Fix a race between __kthread_bind() and sched_setaffinity()07ec7dbsched/fair: Favor higher cpus only for boosted tasks142b2acvmstat: make vmstat_updater deferrable again and shut down on idle74b4fa8sched/fair: call OPP update when going idle after migrationbf93a36sched/cpufreq_sched: fix thermal capping eventsc80a9afsched/fair: Picking cpus with low OPPs for tasks that prefer idle CPUsb9534b8FIXUP: sched/tune: do initialization as a postcore_initicall93db70fDEBUG: sched: add tracepoint for RD overutilizedc5a00c2sched/tune: Introducing a new schedtune attribute prefer_idled4cda03sched: use util instead of capacity to select busy cpu23ed57darch_timer: add error handling when the MPM global timer is cleared8935b6bFIXUP: sched: Fix double-release of spinlock in move_queued_task740d312FIXUP: sched/fair: Fix hang during suspend in sched_group_energy5156b67FIXUP: sched: fix SchedFreq integration for both PELT and WALT782c9d6sched: EAS: Avoid causing spikes to max-freq unnecessarilydfc1151FIXUP: sched: fix set_cfs_cpu_capacity when WALT is in use519c627sched/walt: Accounting for number of irqs pending on each coreefb86bdsched: Introduce Window Assisted Load Tracking (WALT)345be81sched/tune: fix PB and PC cuts indexes definition369bcbbsched/fair: optimize idle cpu selection for boosted tasksaf14760FIXUP: sched/tune: fix accounting for runnable tasks7f8f24asched/tune: use a single initialisation function274bbcfsched/{fair,tune}: simplify fair.c code254f509FIXUP: sched/tune: fix payoff calculation for boost region00aae8dsched/tune: Add support for negative boost values6ba071dFIX: sched/tune: move schedtune_nornalize_energy into fair.c28e8cb9FIX: sched/tune: update usage of boosted task utilisation on CPU selectionc50cc22sched/fair: add tunable to set initial task load4a5e890sched/fair: add tunable to force selection at cpu granularity2e9abbcsched: EAS: take cstate into account when selecting idle cored753e92sched/cpufreq_sched: Consolidated update765c2abFIXUP: sched: fix build for non-SMP targetae54c77DEBUG: sched/tune: add tracepoint on P-E space filtering4525aa3DEBUG: sched/tune: add tracepoint for energy_diff() values962b7c1DEBUG: sched/tune: add tracepoint for task boost signal3a400abCHROMIUM: sched: update the average of nr_running85a09f2UPSTREAM: sched: panic on corrupted stack end287d9d0cpufreq: interactive: drop cpufreq_{get,put}_global_kobject func callsd495067Revert "cpufreq: interactive: build fixes for 4.4"fed95d9DEBUG: schedtune: add tracepoint for schedtune_tasks_update() valuesa09a25cDEBUG: schedtune: add tracepoint for CPU boost signalc8a65d2DEBUG: schedtune: add tracepoint for SchedTune configuration update0a0f4aaDEBUG: sched: add energy procfs interfacee5a2599DEBUG: sched: add tracepoint for CPU load/util signals8017fd7DEBUG: sched: add tracepoint for task load/util signals99ed4e5DEBUG: sched: add tracepoint for cpu/freq scale invariancea2a6dc7sched/fair: filter energy_diff() based on energy_payoff value637ee37sched/tune: add support to compute normalized energy36967b2sched/fair: keep track of energy/capacity variationsa8f6558sched/fair: add boosted task utilizationa515b88sched/{fair,tune}: track RUNNABLE tasks impact on per CPU boost value9cd53fbsched/tune: compute and keep track of per CPU boost value13001f4sched/tune: add initial support for CGroups based boosting07e2294sched/fair: add boosted CPU usage6ed2714sched/fair: add function to convert boost value into "margin"344f4ecsched/tune: add sysctl interface to define a boost value08d1cfdfixup! sched/fair: jump to max OPP when crossing UP threshold3eb2910fixup! sched: scheduler-driven cpu frequency selection6e1e1edsched: rt scheduler sets capacity requirementfab5cc5sched: deadline: use deadline bandwidth in scale_rt_capacitycd248fasched: remove call of sched_avg_update from sched_rt_avg_update9d44dc7sched/cpufreq_sched: add trace events6b6c192sched/fair: jump to max OPP when crossing UP thresholdf99e3fesched/fair: cpufreq_sched triggers for load balancing7ff814ddsched/{core,fair}: trigger OPP change request on fork()ea429ccsched/fair: add triggers for OPP change requestsa967a45sched: scheduler-driven cpu frequency selectionc3b2e76cpufreq: introduce cpufreq_driver_is_slow0d2b1cdsched: Consider misfit tasks when load-balancingf2a8923sched: Add group_misfit_task load-balance type563ddb6sched: Add per-cpu max capacity to sched_group_capacitye14f151sched: Do eas idle balance regardless of the rq avg idle value05a773bsched: Update max cpu capacity in case of max frequency constraintsb03f1bacpufreq: Max freq invariant scheduler load-tracking and cpu capacity supportd2b3db0sched: Support for extracting EAS energy costs from DTb71188bsched: Disable energy-unfriendly nohz kicks52b7b8asched: Consider a not over-utilized energy-aware system as balancede38982bsched: Energy-aware wake-up task placementec055b9sched: Determine the current sched_group idle-state19a5ebesched, cpuidle: Track cpuidle state index in the scheduler1b5ec5dsched: Add over-utilization/tipping point indicator2c6a8a4sched: Estimate energy impact of scheduling decisionsdf2030csched: Extend sched_group_energy to test load-balancing decisionsc1770a5sched: Calculate energy consumption of sched_group5ec8ccasched: Highest energy aware balancing sched_domain level pointerb6c0399sched: Relocated cpu_util() and change return type3e55d2fsched: Compute cpu capacity available at current frequencyf0f739dsched: Introduce SD_SHARE_CAP_STATES sched_domain flag4ce990esched: Initialize energy data structures0b3bda5sched: Introduce energy data structurese496f32sched: Make energy awareness a sched feature681fa14sched: Prevent unnecessary active balance of single task in sched groupcda2bd3sched: Enable idle balance to pull single task towards cpu with higher capacity8a5c033sched: Consider spare cpu capacity at task wake-upe8bcb272sched: Add cpu capacity awareness to wakeup balancing1eb2b8asched: Store system-wide maximum cpu capacity in root domain259bd4ccpufreq: Frequency invariant scheduler load-tracking support440a577cpufreq: interactive: only apply interactive boost when enabled2ebb4d9cpufreq: interactive: fix policy locking31a3049android: skip building drivers as modules0d6687acgroup: Fix issues in allow_attach callback2665ab9trace: cpufreq: Add tracing for min/max cpufreq4808d28subsystem: CPU FREQUENCY DRIVERS- Set cpu_load calculation on current frequencybc68f6ccpufreq: interactive: build fixes for 4.4e197a21cpufreq: interactive: replace strict_strtoul() with kstrtoul()b1b0fd3cpufreq: interactive: Rearm governor timer at max freq0ad0834cpufreq: interactive: Implement cluster-based min_sample_time54c3ec0cpufreq: interactive: Exercise hispeed settings at a policy level4db1f0acpufreq: interactive: Round up timer_rate to match jiffy16e989dcpufreq: interactive: Don't set floor_validate_time during boost10fe4289cpufreq: interactive: Put global cpufreq kobject on failure1e04bc2cpufreq: interactive: only boost tunable affected cpus0b4d5f5cpufreq: interactive: don't skip waking up speedchange_task if target_freq > policy->cur39b5c1bcpufreq: interactive: make common_tunables staticbc88e63cpufreq: interactive: prevents the frequency to directly raise above the hispeed_freq from a lower frequency.61a37b3cpufreq: interactive: remove compilation error from commit 49cc72365fb7ee87762a7ccc6a32ef68627216c56c2c93bcpufreq: interactive: turn boost_pulse off on boost off528ef7dcpufreq: interactive: restructure CPUFREQ_GOV_LIMITS479449ecpufreq: interactive: hold reference on global cpufreq kobject if needed0be8516cpufreq: interactive: Use generic get_cpu_idle_time() from cpufreq.c8385286cpufreq: interactive: fix NULL pointer dereference at sysfs opsef37f1fcpufreq: interactive: fix compiling warnings8fbe05ecpufreq: interactive: delete timers for GOV_START5690bb1cpufreq: Interactive: Implement per policy instances of governor4d30036cpufreq: interactive: Move definition of cpufreq_gov_interactive downwards8db7e96cpufreq: interactive: Remove unnecessary cpu_online() check6728dfacpufreq: interactive: fix show_target_loads and show_above_hispeed_delaye8ad1a85cpufreq: interactive: resched timer if max freq raised7a5c8bacpufreq: interactive: fix race on cpufreq TRANSITION notifier83720c3cpufreq: interactive: avoid underflow on active time calculationaedd63acpufreq: interactive: reduce chance of zero time delta on load evalf71b480cpufreq: interactive: handle errors from cpufreq_frequency_table_targetf587d09cpufreq: interactive: fix uninitialized spinlock75f9b06cpufreq: interactive: base above_hispeed_delay on target freq, not currentcc80bd4cpufreq: interactive: fix crash on error paths in get_tokenized_data6b2fd6ccpufreq: interactive: add io_is_busy interfacec9d7bc6cpufreq: interactive: allow arbitrary speed / delay mappings939e7f1cpufreq: interactive: fix race on governor start/stopc7ad1e1cpufreq: interactive: fix deadlock on spinlock in timer06371b5cpufreq: interactive: don't handle transition notification if not enabledc5ec6c6cpufreq: interactive: init default values at compile timeb4f2820cpufreq: interactive: default go_hispeed_load 99%, doc updates1dc7486cpufreq: interactive: fix race on timer restart on governor start583695fcpufreq: interactive: fix racy timer stopping74f0d69cpufreq: interactive: fix boosting logic959433fcpufreq: interactive: add timer slack to limit idle at speed > mina6d6051cpufreq: interactive: specify duration of CPU speed boost pulse2c2b492cpufreq: interactive: adjust load for changes in speed482f37ecpufreq: interactive: remove load since last speed changefbc1d52cpufreq: interactive: allow arbitrary speed / target load mappingse337153cpufreq: interactive: apply above_hispeed_delay to each step above hispeed4727e1acpufreq: interactive: change speed according to current speed and target loaddc202c3cpufreq: interactive: trace actual speed in target speed decisions2aadfa7cpufreq: interactive: kick timer on idle exit past expiryf5b4e66cpufreq: interactive: use deferrable timer by default916d056cpufreq: interactive: pin timers to associated CPU53d77c9cpufreq: interactive: run at fraction of hispeed_freq when load is low984e8bbcpufreq: interactive: always limit initial speed bump to hispeedf9e8727cpufreq: interactive: remove input_boost handling4429f8bcpufreq: interactive: handle speed up and down in the realtime taskd00caa6cpufreq: interactive: keep freezer happy when not current governor5722666cpufreq: interactive: take idle notifications only when active122c60bcpufreq: interactive: restart above_hispeed_delay at each hispeed loadb62e5a1cpufreq-interactive: Compile fixupc4241c9cpufreq: interactive: add boost pulse interfaceb486bd1cpufreq: interactive: set floor for boosted speed1ea7b77cpufreq: interactive: Add sysfs boost interface for hints from userspace910dea0cpufreq: interactive: remove unused target_validate_time_in_idlec48fcaacpufreq: interactive: Boost frequency on touchscreen input1c31ed4cpufreq: interactive: Separate speed target revalidate time and initial set time759fcddcpufreq: interactive: base hispeed bump on target freq, not actual3b14df5cpufreq: interactive: adjust code and documentation to match4ca4034cpufreq: interactive: configurable delay before raising above hispeed27c22a7cpufreq: interactive: don't drop speed if recently at higher load7b2dc7ecpufreq: interactive: set at least hispeed when above hispeed load077dfe8cpufreq: interactive: apply intermediate load to max speed not current2561571cpufreq interactive governor: event tracing008bd61cpufreq: interactive: New 'interactive' governor7abc800sched: add sched blocked tracepoint which dumps out context of sleep.b3bb0e5sched: Enable might_sleep before initializing drivers. Signed-off-by: Andres Oportus <andresoportus@google.com> Change-Id: Ibbeaddb04b44dba77aaead172d07d5bb29e61a3e
290 lines
7.5 KiB
C
290 lines
7.5 KiB
C
#ifndef _LINUX_VMSTAT_H
|
|
#define _LINUX_VMSTAT_H
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/mmzone.h>
|
|
#include <linux/vm_event_item.h>
|
|
#include <linux/atomic.h>
|
|
|
|
extern int sysctl_stat_interval;
|
|
|
|
#ifdef CONFIG_VM_EVENT_COUNTERS
|
|
/*
|
|
* Light weight per cpu counter implementation.
|
|
*
|
|
* Counters should only be incremented and no critical kernel component
|
|
* should rely on the counter values.
|
|
*
|
|
* Counters are handled completely inline. On many platforms the code
|
|
* generated will simply be the increment of a global address.
|
|
*/
|
|
|
|
struct vm_event_state {
|
|
unsigned long event[NR_VM_EVENT_ITEMS];
|
|
};
|
|
|
|
DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
|
|
|
|
/*
|
|
* vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
|
|
* local_irq_disable overhead.
|
|
*/
|
|
static inline void __count_vm_event(enum vm_event_item item)
|
|
{
|
|
raw_cpu_inc(vm_event_states.event[item]);
|
|
}
|
|
|
|
static inline void count_vm_event(enum vm_event_item item)
|
|
{
|
|
this_cpu_inc(vm_event_states.event[item]);
|
|
}
|
|
|
|
static inline void __count_vm_events(enum vm_event_item item, long delta)
|
|
{
|
|
raw_cpu_add(vm_event_states.event[item], delta);
|
|
}
|
|
|
|
static inline void count_vm_events(enum vm_event_item item, long delta)
|
|
{
|
|
this_cpu_add(vm_event_states.event[item], delta);
|
|
}
|
|
|
|
extern void all_vm_events(unsigned long *);
|
|
|
|
extern void vm_events_fold_cpu(int cpu);
|
|
|
|
#else
|
|
|
|
/* Disable counters */
|
|
static inline void count_vm_event(enum vm_event_item item)
|
|
{
|
|
}
|
|
static inline void count_vm_events(enum vm_event_item item, long delta)
|
|
{
|
|
}
|
|
static inline void __count_vm_event(enum vm_event_item item)
|
|
{
|
|
}
|
|
static inline void __count_vm_events(enum vm_event_item item, long delta)
|
|
{
|
|
}
|
|
static inline void all_vm_events(unsigned long *ret)
|
|
{
|
|
}
|
|
static inline void vm_events_fold_cpu(int cpu)
|
|
{
|
|
}
|
|
|
|
#endif /* CONFIG_VM_EVENT_COUNTERS */
|
|
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
#define count_vm_numa_event(x) count_vm_event(x)
|
|
#define count_vm_numa_events(x, y) count_vm_events(x, y)
|
|
#else
|
|
#define count_vm_numa_event(x) do {} while (0)
|
|
#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
|
|
#endif /* CONFIG_NUMA_BALANCING */
|
|
|
|
#ifdef CONFIG_DEBUG_TLBFLUSH
|
|
#define count_vm_tlb_event(x) count_vm_event(x)
|
|
#define count_vm_tlb_events(x, y) count_vm_events(x, y)
|
|
#else
|
|
#define count_vm_tlb_event(x) do {} while (0)
|
|
#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
|
|
#endif
|
|
|
|
#ifdef CONFIG_DEBUG_VM_VMACACHE
|
|
#define count_vm_vmacache_event(x) count_vm_event(x)
|
|
#else
|
|
#define count_vm_vmacache_event(x) do {} while (0)
|
|
#endif
|
|
|
|
#define __count_zone_vm_events(item, zone, delta) \
|
|
__count_vm_events(item##_NORMAL - ZONE_NORMAL + \
|
|
zone_idx(zone), delta)
|
|
|
|
/*
|
|
* Zone based page accounting with per cpu differentials.
|
|
*/
|
|
extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
|
|
|
|
static inline void zone_page_state_add(long x, struct zone *zone,
|
|
enum zone_stat_item item)
|
|
{
|
|
atomic_long_add(x, &zone->vm_stat[item]);
|
|
atomic_long_add(x, &vm_stat[item]);
|
|
}
|
|
|
|
static inline unsigned long global_page_state(enum zone_stat_item item)
|
|
{
|
|
long x = atomic_long_read(&vm_stat[item]);
|
|
#ifdef CONFIG_SMP
|
|
if (x < 0)
|
|
x = 0;
|
|
#endif
|
|
return x;
|
|
}
|
|
|
|
static inline unsigned long zone_page_state(struct zone *zone,
|
|
enum zone_stat_item item)
|
|
{
|
|
long x = atomic_long_read(&zone->vm_stat[item]);
|
|
#ifdef CONFIG_SMP
|
|
if (x < 0)
|
|
x = 0;
|
|
#endif
|
|
return x;
|
|
}
|
|
|
|
/*
|
|
* More accurate version that also considers the currently pending
|
|
* deltas. For that we need to loop over all cpus to find the current
|
|
* deltas. There is no synchronization so the result cannot be
|
|
* exactly accurate either.
|
|
*/
|
|
static inline unsigned long zone_page_state_snapshot(struct zone *zone,
|
|
enum zone_stat_item item)
|
|
{
|
|
long x = atomic_long_read(&zone->vm_stat[item]);
|
|
|
|
#ifdef CONFIG_SMP
|
|
int cpu;
|
|
for_each_online_cpu(cpu)
|
|
x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
|
|
|
|
if (x < 0)
|
|
x = 0;
|
|
#endif
|
|
return x;
|
|
}
|
|
|
|
static inline unsigned long global_page_state_snapshot(enum zone_stat_item item)
|
|
{
|
|
long x = atomic_long_read(&vm_stat[item]);
|
|
|
|
#ifdef CONFIG_SMP
|
|
struct zone *zone;
|
|
int cpu;
|
|
|
|
for_each_online_cpu(cpu) {
|
|
for_each_populated_zone(zone)
|
|
x += per_cpu_ptr(zone->pageset,
|
|
cpu)->vm_stat_diff[item];
|
|
}
|
|
|
|
if (x < 0)
|
|
x = 0;
|
|
#endif
|
|
return x;
|
|
}
|
|
|
|
#ifdef CONFIG_NUMA
|
|
|
|
extern unsigned long node_page_state(int node, enum zone_stat_item item);
|
|
extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
|
|
|
|
#else
|
|
|
|
#define node_page_state(node, item) global_page_state(item)
|
|
#define zone_statistics(_zl, _z, gfp) do { } while (0)
|
|
|
|
#endif /* CONFIG_NUMA */
|
|
|
|
#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
|
|
#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
|
|
|
|
#ifdef CONFIG_SMP
|
|
void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
|
|
void __inc_zone_page_state(struct page *, enum zone_stat_item);
|
|
void __dec_zone_page_state(struct page *, enum zone_stat_item);
|
|
|
|
void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
|
|
void inc_zone_page_state(struct page *, enum zone_stat_item);
|
|
void dec_zone_page_state(struct page *, enum zone_stat_item);
|
|
|
|
extern void inc_zone_state(struct zone *, enum zone_stat_item);
|
|
extern void __inc_zone_state(struct zone *, enum zone_stat_item);
|
|
extern void dec_zone_state(struct zone *, enum zone_stat_item);
|
|
extern void __dec_zone_state(struct zone *, enum zone_stat_item);
|
|
|
|
void quiet_vmstat(void);
|
|
void cpu_vm_stats_fold(int cpu);
|
|
void refresh_zone_stat_thresholds(void);
|
|
|
|
void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
|
|
|
|
int calculate_pressure_threshold(struct zone *zone);
|
|
int calculate_normal_threshold(struct zone *zone);
|
|
void set_pgdat_percpu_threshold(pg_data_t *pgdat,
|
|
int (*calculate_pressure)(struct zone *));
|
|
#else /* CONFIG_SMP */
|
|
|
|
/*
|
|
* We do not maintain differentials in a single processor configuration.
|
|
* The functions directly modify the zone and global counters.
|
|
*/
|
|
static inline void __mod_zone_page_state(struct zone *zone,
|
|
enum zone_stat_item item, long delta)
|
|
{
|
|
zone_page_state_add(delta, zone, item);
|
|
}
|
|
|
|
static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
|
|
{
|
|
atomic_long_inc(&zone->vm_stat[item]);
|
|
atomic_long_inc(&vm_stat[item]);
|
|
}
|
|
|
|
static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
|
|
{
|
|
atomic_long_dec(&zone->vm_stat[item]);
|
|
atomic_long_dec(&vm_stat[item]);
|
|
}
|
|
|
|
static inline void __inc_zone_page_state(struct page *page,
|
|
enum zone_stat_item item)
|
|
{
|
|
__inc_zone_state(page_zone(page), item);
|
|
}
|
|
|
|
static inline void __dec_zone_page_state(struct page *page,
|
|
enum zone_stat_item item)
|
|
{
|
|
__dec_zone_state(page_zone(page), item);
|
|
}
|
|
|
|
/*
|
|
* We only use atomic operations to update counters. So there is no need to
|
|
* disable interrupts.
|
|
*/
|
|
#define inc_zone_page_state __inc_zone_page_state
|
|
#define dec_zone_page_state __dec_zone_page_state
|
|
#define mod_zone_page_state __mod_zone_page_state
|
|
|
|
#define inc_zone_state __inc_zone_state
|
|
#define dec_zone_state __dec_zone_state
|
|
|
|
#define set_pgdat_percpu_threshold(pgdat, callback) { }
|
|
|
|
static inline void refresh_zone_stat_thresholds(void) { }
|
|
static inline void cpu_vm_stats_fold(int cpu) { }
|
|
static inline void quiet_vmstat(void) { }
|
|
|
|
static inline void drain_zonestat(struct zone *zone,
|
|
struct per_cpu_pageset *pset) { }
|
|
#endif /* CONFIG_SMP */
|
|
|
|
static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
|
|
int migratetype)
|
|
{
|
|
__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
|
|
if (is_migrate_cma(migratetype))
|
|
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
|
|
}
|
|
|
|
extern const char * const vmstat_text[];
|
|
|
|
#endif /* _LINUX_VMSTAT_H */
|