Revert "sched/tune: add sysctl interface to define a boost value" Revert "sched: Initialize HMP stats inside init_sd_lb_stats()" Revert "sched: Fix integer overflow in sched_update_nr_prod()" Revert "sched: Add a device tree property to specify the sched boost type" Revert "sched: Add a stub function for init_clusters()" Revert "sched: add a knob to prefer the waker CPU for sync wakeups" Revert "sched: Fix a division by zero bug in scale_exec_time()" Revert "sched: Fix CPU selection when all online CPUs are isolated" Revert "sched: don't assume higher capacity means higher power in lb" Revert "sched/core_ctl: Integrate core control with cpu isolation" Revert "sched/core_ctl: Refactor cpu data" Revert "trace: Move core control trace events to scheduler" Revert "core_ctrl: Move core control into kernel" Revert "sched/core: Add trace point for cpu isolation" Revert "sched: add cpu isolation support" Revert "sched: constrain HMP scheduler tunable range with in better way" Revert "sched: panic on corrupted stack end" Revert "sched: Move data structures under CONFIG_SCHED_HMP" Revert "sched: Further re-factor HMP specific code" Revert "sched: Make use of sysctl_sched_wake_to_idle in select_best_cpu" Revert "sched: Introduce sched_freq_aggregate_threshold tunable" Revert "sched: handle frequency alert notifications better" Revert "sched: inherit the group id from the group leader" Revert "sched/fair: Add flag to indicate why we picked the CPU" Revert "sched: Move notify_migration() under CONFIG_SCHED_HMP" Revert "sched: Move most HMP specific code to a separate file." Revert "sched: Consolidate CONFIG_SCHED_HMP sections in various files" Revert "sched: Fix compile issues for !CONFIG_SCHED_HMP" Revert "sched: Remove all existence of CONFIG_SCHED_FREQ_INPUT" Revert "sched: Move CPU cstate tracking under CONFIG_SCHED_HMP" Revert "sched: Remove unused PELT extensions for HMP scheduling" Revert "sched: Remove unused migration notifier code." Revert "Revert "arm64: Add support for app specific settings"" Revert "sched/fair: Don't check for migration for a pinned task" Revert "sched/core: Fix uninitialized variable used for tracepoint" Revert "sched/core: Fix null-pointer dereference" Revert "sched: break the forever prev_cpu selection preference" Revert "sched: core: Fix possible hotplug race in set_cpus_allowed_ptr" Revert "sched: kill unnecessary divisions on fast path" Revert "sched: prevent race where update CPU cycles" Revert "sched: fix overflow in scaled execution time calculation" Revert "sched: remove unused parameter cpu from cpu_cycles_to_freq()" Revert "sched: avoid potential race between governor and thermal driver" Revert "sched: fix potential deflated frequency estimation during IRQ handling" Revert "sched: fix CPU frequency estimation while idle" Revert "sched: preserve CPU cycle counter in rq" Revert "arm64: Add support for app specific settings" Revert "Revert "sched: warn/panic upon excessive scheduling latency"" Revert "Revert "sched: add scheduling latency tracking procfs node"" Revert "sched: eliminate sched_early_detection_duration knob" Revert "sched: Remove the sched heavy task frequency guidance feature" Revert "sched: eliminate sched_migration_fixup knob" Revert "sched: eliminate sched_upmigrate_min_nice knob" Revert "sched: eliminate sched_enable_power_aware knob and parameter" Revert "sched: eliminate sched_freq_account_wait_time knob" Revert "sched: eliminate sched_account_wait_time knob" Revert "sched: Aggregate for frequency" Revert "sched: simplify CPU frequency estimation and cycle counter API" Revert "sched: use correct Kconfig macro name CONFIG_SCHED_HMP_CSTATE_AWARE" Revert "Revert "sched: set HMP scheduler's default initial task load to 100%"" Revert "watchdog: introduce touch_softlockup_watchdog_sched()" Revert "sched/cgroup: Fix/cleanup cgroup teardown/init" Revert "sched: take into account of limited CPU min and max frequencies" Revert "sched: add support for CPU frequency estimation with cycle counter" Revert "sched: revise sched_boost to make the best of big cluster CPUs" Revert "sched: fix excessive task packing where CONFIG_SCHED_HMP_CSTATE_AWARE=y" Revert "sched: add option whether CPU C-state is used to guide task placement" Revert "sched: update placement logic to prefer C-state and busier CPUs" Revert "sched: Optimize wakeup placement logic when need_idle is set" Revert "kernel: sched: Fix compilation issues for Usermode Linux" Revert "sched/cputime: Fix steal time accounting vs. CPU hotplug" Revert "sched/cputime: Fix steal_account_process_tick() to always return jiffies" Revert "sched: fix circular dependency of rq->lock and kswadp waitqueue lock" Revert "sched: move out migration notification out of spinlock" Revert "sched: fix compile failure with !CONFIG_SCHED_HMP" Revert "sched: restrict sync wakee placement bias with waker's demand" Revert "sched: add preference for waker cluster CPU in wakee task placement" Revert "sched/core: Add protection against null-pointer dereference" Revert "sched: allow select_prev_cpu_us to be set to values greater than 100us" Revert "sched: clean up idle task's mark_start restoring in init_idle()" Revert "sched: let sched_boost take precedence over sched_restrict_cluster_spill" Revert "sched: Add separate load tracking histogram to predict loads" Revert "sched: Provide a wake up API without sending freq notifications" Revert "sched: Take downmigrate threshold into consideration" Revert "sched: Provide a facility to restrict RT tasks to lower power cluster" Revert "sched: Take cluster's minimum power into account for optimizing sbc()" Revert "sched: Revise the inter cluster load balance restrictions" Revert "sched: colocate related threads" Revert "sched: Update fair and rt placement logic to use scheduler clusters" Revert "sched: Introduce the concept CPU clusters in the scheduler" Revert "sched: remove init_new_task_load from CONFIG_SMP" Revert "sched: Export sched_setscheduler_nocheck()" Revert "Revert "sched: Export sched_setscheduler_nocheck"" Revert "vmstat: make vmstat_updater deferrable again and shut down on idle" Revert "sched: fix compile failure where !CONFIG_SCHED_HMP" Revert "sched: select task's prev_cpu as the best CPU when it was chosen recently" Revert "sched: use ktime instead of sched_clock for load tracking" Revert "sched: Update min/max capacity for the CPUFREQ_CREATE_POLICY notifier" Revert "sched/cputime: fix a deadlock on 32bit systems" Revert "sched: Optimize scheduler trace events to reduce trace buffer usage" Revert "sched: initialize frequency domain cpumask" Revert "sched: print sched_task_load always" Revert "sched: add preference for prev and sibling CPU in RT task placement" Revert "sched: core: Don't use current task_cpu when migrating with stop_one_cpu" Revert "sched: Notify cpufreq governor early about potential big tasks" Revert "sched: Skip resetting HMP stats when max frequencies remain unchanged" Revert "sched: update sched_task_load trace event" Revert "sched: avoid unnecessary multiplication and division" Revert "sched: precompute required frequency for CPU load" Revert "sched: clean up fixup_hmp_sched_stats()" Revert "sched: account new task load so that governor can apply different policy" Revert "sched: Fix frequency change checks when affined tasks are migrating" Revert "sched: Add tunables for static cpu and cluster cost" Revert "sched/core: Add API to set cluster d-state" Revert "sched: take into account of governor's frequency max load" Revert "sched: set HMP scheduler's default initial task load to 100%" Revert "sched: add preference for prev and sibling CPU in HMP task placement" Revert "sched: Update task->on_rq when tasks are moving between runqueues" Revert "sched: remove temporary demand fixups in fixup_busy_time()" Revert "sched: add frequency zone awareness to the load balancer" Revert "sched: Update the wakeup placement logic for fair and rt tasks" Revert "sched: remove the notion of small tasks and small task packing" Revert "sched: Rework energy aware scheduling" Revert "sched: encourage idle load balance and discourage active load balance" Revert "sched: avoid stale cumulative_runnable_avg HMP statistics" Revert "sched: Add load based placement for RT tasks" Revert "sched: Avoid running idle_balance() consecutively" Revert "sched: inline function scale_load_to_cpu()" Revert "sched: look for least busy and fallback CPU only when it's needed" Revert "sched: iterate search CPUs starting from prev_cpu for optimization" Revert "sched: Optimize the select_best_cpu() "for" loop" Revert "sched: Optimize select_best_cpu() to reduce execution time" Revert "sched/debug: Add Kconfig to trigger panics on all 'BUG:' conditions" Revert "sched: fix incorrect prev_runnable_sum accounting with long ISR run" Revert "sched: prevent task migration while governor queries CPUs' load" Revert "sched: report loads greater than 100% only during load alert notifications" Revert "sched: turn off the TTWU_QUEUE feature" Revert "sched: avoid unnecessary HMP scheduler stat re-accounting" Revert "sched/fair: Fix capacity and nr_run comparisons in can_migrate_task()" Revert "Revert "sched: Use only partial wait time as task demand"" Revert "sched/deadline: Add basic HMP extensions" Revert "sched: Fix racy invocation of fixup_busy_time via move_queued_task" Revert "sched: don't inflate the task load when the CPU max freq is restricted" Revert "sched: auto adjust the upmigrate and downmigrate thresholds" Revert "sched: don't inherit initial task load from the parent" Revert "sched/fair: Add irq load awareness to the tick CPU selection logic" Revert "sched: disable IRQs in update_min_max_capacity" Revert "sched: Use only partial wait time as task demand" Revert "sched: fix race conditions where HMP tunables change" Revert "sched: check HMP scheduler tunables validity" Revert "sched: Update max_capacity when an entire cluster is hotplugged" Revert "sched: Ensure attempting load balance when HMP active balance flags are set" Revert "sched: add scheduling latency tracking procfs node" Revert "sched: warn/panic upon excessive scheduling latency" Revert "sched/core: Fix incorrect wait time and wait count statistics" Revert "sched: Update cur_freq in the cpufreq policy notifier callback" Revert "sched: avoid CPUs with high irq activity for non-small tasks" Revert "sched: actively migrate big tasks on power CPU to idle performance CPU" Revert "sched: Add cgroup-based criteria for upmigration" Revert "sched: avoid running idle_balance() on behalf of wrong CPU" Revert "sched: Keep track of average nr_big_tasks" Revert "sched: Fix bug in average nr_running and nr_iowait calculation" Revert "sched: Avoid pulling all tasks from a CPU during load balance" Revert "sched: Avoid pulling big tasks to the little cluster during load balance" Revert "sched: fix rounding error on scaled execution time calculation" Revert "sched/fair: Respect wake to idle over sync wakeup" Revert "sched: Support CFS_BANDWIDTH feature in HMP scheduler" Revert "sched: Consolidate hmp stats into their own struct" Revert "sched: Add userspace interface to set PF_WAKE_UP_IDLE" Revert "sched_avg: add run queue averaging" Revert "sched: add sched feature FORCE_CPU_THROTTLING_IMMINENT" Revert "sched: continue to search less power efficient cpu for load balancer" Revert "sched: Update cur_freq for offline CPUs in notifier callback" Revert "sched: Fix overflow in max possible capacity calculation" Revert "sched: add preference for prev_cpu in HMP task placement" Revert "sched: Per-cpu prefer_idle flag" Revert "sched: Consider PF_WAKE_UP_IDLE in select_best_cpu()" Revert "sched: Add sysctl to enable power aware scheduling" Revert "sched: Ensure no active EA migration occurs when EA is disabled" Revert "sched: take account of irq preemption when calculating irqload delta" Revert "sched: Prevent race conditions where upmigrate_min_nice changes" Revert "sched: Avoid frequent task migration due to EA in lb" Revert "sched: Avoid migrating tasks to little cores due to EA" Revert "sched: Add temperature to cpu_load trace point" Revert "sched: Only do EA migration when CPU throttling is imminent" Revert "sched: Avoid frequent migration of running task" Revert "sched: treat sync waker CPUs with 1 task as idle" Revert "sched: extend sched_task_load tracepoint to indicate prefer_idle" Revert "sched: extend sched_task_load tracepoint to indicate sync wakeup" Revert "sched: add sync wakeup recognition in select_best_cpu" Revert "sched: Provide knob to prefer mostly_idle over idle cpus" Revert "sched: make sched_cpu_high_irqload a runtime tunable" Revert "sched: trace: extend sched_cpu_load to print irqload" Revert "sched: avoid CPUs with high irq activity" Revert "sched: refresh sched_clock() after acquiring rq lock in irq path" Revert "sched: track soft/hard irqload per-RQ with decaying avg" Revert "sched: do not set window until sched_clock is fully initialized" Revert "sched: Make RT tasks eligible for boost" Revert "sched: Limit LBF_PWR_ACTIVE_BALANCE to within cluster" Revert "sched: Packing support until a frequency threshold" Revert "sched: tighten up jiffy to sched_clock mapping" Revert "sched: Avoid unnecessary load balance when tasks don't fit on dst_cpu" Revert "sched: print sched_cpu_load tracepoint for all CPUs" Revert "sched: per-cpu mostly_idle threshold" Revert "sched: Add API to set task's initial task load" Revert "sched: use C-states in non-small task wakeup placement logic" Revert "sched: take rq lock prior to saving idle task's mark_start" Revert "sched: update governor notification logic" Revert "sched: window-stats: Retain idle thread's mark_start" Revert "sched: Add checks for frequency change" Revert "sched: Use absolute scale for notifying governor" Revert "sched: window-stats: Enhance cpu busy time accounting" Revert "sched: window-stats: ftrace event improvements" Revert "sched: improve logic for alerting governor" Revert "sched: Stop task migration to busy CPUs due to power active balance" Revert "sched: window-stats: Fix accounting bug in legacy mode" Revert "sched: window-stats: Note legacy mode in fork() and exit()" Revert "sched: Fix reference to stale task_struct in try_to_wake_up()" Revert "sched: Remove hack to enable/disable HMP scheduling extensions" Revert "sched: fix wrong load_scale_factor/capacity/nr_big/small_tasks" Revert "sched: add check for cpu idleness when using C-state information" Revert "sched: extend sched_task_load tracepoint to indicate small tasks" Revert "sched: Add C-state tracking to the sched_cpu_load trace event" Revert "sched: window-stats: add a new AVG policy" Revert "sched: Fix compile error" Revert "sched: update ld_moved for active balance from the load balancer" Revert "sched: actively migrate tasks to idle big CPUs during sched boost" Revert "sched: always do idle balance with a NEWLY_IDLE idle environment" Revert "sched: fix bail condition in bail_inter_cluster_balance()" Revert "sched: Initialize env->loop variable to 0" Revert "sched: window-stats: use policy_mutex in sched_set_window()" Revert "sched: window-stats: Avoid taking all cpu's rq->lock for long" Revert "sched: window_stats: Add "disable" mode support" Revert "sched: window-stats: Fix exit race" Revert "sched: window-stats: code cleanup" Revert "sched: window-stats: legacy mode" Revert "sched: window-stats: Code cleanup" Revert "sched: window-stats: Code cleanup" Revert "sched: window-stats: Code cleanup" Revert "sched: window-stats: Remove unused prev_window variable" Revert "sched: disable frequency notifications by default" Revert "sched: fix misalignment between requested and actual windows" Revert "sched: Make RAVG_HIST_SIZE tunable" Revert "sched: Fix possibility of "stuck" reserved flag" Revert "sched: initialize env->flags variable to 0" Revert "sched: window-stats: 64-bit type for curr/prev_runnable_sum" Revert "sched: window-stats: Allow acct_wait_time to be tuned" Revert "sched: window-stats: Account interrupt handling time as busy time" Revert "sched: window-stats: Account idle time as busy time" Revert "sched: window-stats: Account wait time" Revert "sched: window-stats: update task demand on tick" Revert "sched: Fix herding issue" Revert "sched: window-stats: print window size in /proc/sched_debug" Revert "sched: Extend ftrace event to record boost and reason code" Revert "sched: Avoid needless migration" Revert "sched: Drop active balance request upon cpu going offline" Revert "sched: trigger immediate migration of tasks upon boost" Revert "sched: Extend boost benefit for small and low-prio tasks" Revert "sched: window-stats: Handle policy change properly" Revert "sched: window-stats: Reset all window stats" Revert "sched: window-stats: Additional error checking in sched_set_window()" Revert "sched: window-stats: Fix incorrect calculation of partial_demand" Revert "sched: window-stats: Fix potential wrong use of rq" Revert "sched: set initial task load to just above a small task" Revert "sched/fair: Check whether any CPUs are available" Revert "sched: enable hmp, power aware scheduling for targets with > 4 CPUs" Revert "sched: remove sysctl control for HMP and power-aware task placement" Revert "sched: support legacy mode better" Revert "sched: code cleanup" Revert "sched: Add BUG_ON when task_cpu() is incorrect" Revert "sched: avoid active migration of tasks not in TASK_RUNNING state" Revert "sched: fix up task load during migration" Revert "sched: avoid pushing tasks to an offline CPU" Revert "sched: Add a per rq max_possible_capacity for use in power calculations" Revert "sched: Disable interrupts when holding the rq lock in sched_get_busy()" Revert "sched: Make wallclock more accurate" Revert "sched: Make task and CPU load calculations safe from truncation" Revert "sched/fair: Introduce C-state aware task placement for small tasks" Revert "sched/fair: Introduce scheduler boost for low latency workloads" Revert "sched: Move call to trace_sched_cpu_load()" Revert "sched: fair: Reset balance_interval before sending NOHZ kick" Revert "sched: Avoid active migration of small tasks" Revert "sched: Account for cpu's current frequency when calculating its power cost" Revert "sched: make sched_set_window() return failure when PELT is in use" Revert "sched: debug: Print additional information in /proc/sched_debug" Revert "sched: Move around code" Revert "sched: Update capacity of all online cpus when min_max_freq changes" Revert "sched: update task statistics when CPU frequency changes" Revert "sched: Add new trace events" Revert "sched: do not balance on exec if SCHED_HMP" Revert "sched: Use historical load for freq governor input" Revert "sched: window-stats: apply scaling to full elapsed windows" Revert "sched: notify cpufreq on over/underprovisioned CPUs" Revert "sched: Introduce spill threshold tunables to manage overcommitment" Revert "sched: add affinity, task load information to sched tracepoints" Revert "sched: add migration load change notifier for frequency guidance" Revert "sched/fair: Limit MAX_PINNED_INTERVAL for more frequent load balancing" Revert "sched/fair: Help out higher capacity CPUs when they are overcommitted" Revert "sched/rt: Introduce power aware scheduling for real time tasks" Revert "sched: balance power inefficient CPUs with one task" Revert "sched: check for power inefficient task placement in tick" Revert "sched: do nohz load balancing in order of power efficiency" Revert "sched: run idle_balance() on most power-efficient CPU" Revert "sched: add hook for platform-specific CPU power information" Revert "sched: add power aware scheduling sysctl" Revert "sched: Extend update_task_ravg() to accept wallclock as argument" Revert "sched: add sched_get_busy, sched_set_window APIs" Revert "sched: window-stats: adjust RQ curr, prev sums on task migration" Revert "sched: window-stats: Add aggregated runqueue windowed stats" Revert "sched: window-stats: add prev_window counter per-task" Revert "sched: window-stats: synchronize windows across cpus" Revert "sched: window-stats: Do not account wait time" Revert "sched: window-stats: update during migration and earlier at wakeup" Revert "sched: move definition of update_task_ravg()" Revert "sched: Switch to windows based load stats by default" Revert "sched: Provide tunable to switch between PELT and window-based stats" Revert "sched: Provide scaled load information for tasks in /proc" Revert "sched: Add additional ftrace events" Revert "sched: Extend /proc/sched_debug with additional information" Revert "sched: Tighten controls for tasks spillover to idle cluster" Revert "sched: Track number of big and small tasks on a cpu" Revert "sched: Handle cpu-bound tasks stuck on wrong cpu" Revert "sched: Extend active balance to accept 'push_task' argument" Revert "sched: Send NOHZ kick to idle cpu in same cluster" Revert "sched: Basic task placement support for HMP systems" Revert "sched: Use rq->efficiency in scaling load stats" Revert "sched: Introduce efficiency, load_scale_factor and capacity" Revert "sched: Add CONFIG_SCHED_HMP Kconfig option" Revert "sched: Add scaled task load statistics" Revert "sched: Introduce CONFIG_SCHED_FREQ_INPUT" Revert "sched: window-based load stats improvements" Revert "sched: Add min_max_freq and rq->max_possible_freq" Revert "sched: move task load based functions" Revert "sched: fix race between try_to_wake_up() and move_task()" Revert "sched: Skip load update for idle task" Revert "sched: Window-based load stat improvements" Revert "sched: Call the notify_on_migrate notifier chain for wakeups as well" Revert "cpufreq: cpu-boost: Introduce scheduler assisted load based syncs" Revert "sched: window-based load stats for tasks" Revert "sched: Make scheduler aware of cpu frequency state" Revert "sched/debug: Make sysrq prints of sched debug data optional" Revert "tracing/sched: add load balancer tracepoint" Revert "sched: change WARN_ON_ONCE to printk_deferred() in try_to_wake_up_local()" Revert "tracing/sched: Track per-cpu rt and non-rt cpu_load." Revert "sched: re-calculate a cpu's next_balance point upon sched domain changes" Revert "sched: provide per cpu-cgroup option to notify on migrations" Revert "sched: Fix SCHED_HRTICK bug leading to late preemption of tasks" Revert "kernel: reduce sleep duration in wait_task_inactive" Revert "sched: add sysctl for controlling task migrations on wake" Revert "sched/rt: Add Kconfig option to enable panicking for RT throttling" Revert "sched/rt: print RT tasks when RT throttling is activated" Revert "sched: add PF_WAKE_UP_IDLE" Revert "sched: Make the scheduler aware of C-state for cpus" Revert "sched: Fix crash in sched_init_numa()" Revert "cpufreq: interactive: New 'interactive' governor" Revert "drivers: cpuidle: lpm-levels: Notify sched of idle state entry/exit" Revert "arm64: topology: Add support for topology DT bindings" Revert "clk: msm: clock-osm: register cycle counter callbacks with scheduler" Revert "arm64: fpsimd: Enable FP(floating-point) settings for msm8996" Documentation: remove sched-hmp and sched-zone docs sched: Add a set_wake_up_idle dummy implementation driver: thermal: remove call to sched defconfig: remove HMP related config Change-Id: Icfc908d8e5414499642c2dffb59a7d6496392b0d Signed-off-by: Andres Oportus <andresoportus@google.com>
301 lines
9.2 KiB
C
301 lines
9.2 KiB
C
/*
|
|
* include/linux/cpu.h - generic cpu definition
|
|
*
|
|
* This is mainly for topological representation. We define the
|
|
* basic 'struct cpu' here, which can be embedded in per-arch
|
|
* definitions of processors.
|
|
*
|
|
* Basic handling of the devices is done in drivers/base/cpu.c
|
|
*
|
|
* CPUs are exported via sysfs in the devices/system/cpu
|
|
* directory.
|
|
*/
|
|
#ifndef _LINUX_CPU_H_
|
|
#define _LINUX_CPU_H_
|
|
|
|
#include <linux/node.h>
|
|
#include <linux/compiler.h>
|
|
#include <linux/cpumask.h>
|
|
|
|
struct device;
|
|
struct device_node;
|
|
struct attribute_group;
|
|
|
|
struct cpu {
|
|
int node_id; /* The node which contains the CPU */
|
|
int hotpluggable; /* creates sysfs control file if hotpluggable */
|
|
struct device dev;
|
|
};
|
|
|
|
extern int register_cpu(struct cpu *cpu, int num);
|
|
extern struct device *get_cpu_device(unsigned cpu);
|
|
extern bool cpu_is_hotpluggable(unsigned cpu);
|
|
extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
|
|
extern bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
|
|
int cpu, unsigned int *thread);
|
|
|
|
extern int cpu_add_dev_attr(struct device_attribute *attr);
|
|
extern void cpu_remove_dev_attr(struct device_attribute *attr);
|
|
|
|
extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
|
|
extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
|
|
|
|
extern __printf(4, 5)
|
|
struct device *cpu_device_create(struct device *parent, void *drvdata,
|
|
const struct attribute_group **groups,
|
|
const char *fmt, ...);
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
extern void unregister_cpu(struct cpu *cpu);
|
|
extern ssize_t arch_cpu_probe(const char *, size_t);
|
|
extern ssize_t arch_cpu_release(const char *, size_t);
|
|
#endif
|
|
struct notifier_block;
|
|
|
|
/*
|
|
* CPU notifier priorities.
|
|
*/
|
|
enum {
|
|
/*
|
|
* SCHED_ACTIVE marks a cpu which is coming up active during
|
|
* CPU_ONLINE and CPU_DOWN_FAILED and must be the first
|
|
* notifier. CPUSET_ACTIVE adjusts cpuset according to
|
|
* cpu_active mask right after SCHED_ACTIVE. During
|
|
* CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are
|
|
* ordered in the similar way.
|
|
*
|
|
* This ordering guarantees consistent cpu_active mask and
|
|
* migration behavior to all cpu notifiers.
|
|
*/
|
|
CPU_PRI_SCHED_ACTIVE = INT_MAX,
|
|
CPU_PRI_CPUSET_ACTIVE = INT_MAX - 1,
|
|
CPU_PRI_SCHED_INACTIVE = INT_MIN + 1,
|
|
CPU_PRI_CPUSET_INACTIVE = INT_MIN,
|
|
|
|
/* migration should happen before other stuff but after perf */
|
|
CPU_PRI_PERF = 20,
|
|
CPU_PRI_MIGRATION = 10,
|
|
CPU_PRI_SMPBOOT = 9,
|
|
/* bring up workqueues before normal notifiers and down after */
|
|
CPU_PRI_WORKQUEUE_UP = 5,
|
|
CPU_PRI_WORKQUEUE_DOWN = -5,
|
|
};
|
|
|
|
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
|
|
#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
|
|
#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
|
|
#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
|
|
#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
|
|
#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
|
|
#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
|
|
* not handling interrupts, soon dead.
|
|
* Called on the dying cpu, interrupts
|
|
* are already disabled. Must not
|
|
* sleep, must not fail */
|
|
#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
|
|
* lock is dropped */
|
|
#define CPU_STARTING 0x000A /* CPU (unsigned)v soon running.
|
|
* Called on the new cpu, just before
|
|
* enabling interrupts. Must not sleep,
|
|
* must not fail */
|
|
#define CPU_DYING_IDLE 0x000B /* CPU (unsigned)v dying, reached
|
|
* idle loop. */
|
|
#define CPU_BROKEN 0x000C /* CPU (unsigned)v did not die properly,
|
|
* perhaps due to preemption. */
|
|
|
|
/* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
|
|
* operation in progress
|
|
*/
|
|
#define CPU_TASKS_FROZEN 0x0010
|
|
|
|
#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
|
|
#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
|
|
#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
|
|
#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
|
|
#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
|
|
#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
|
|
#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
|
|
#define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN)
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
/* Need to know about CPUs going up/down? */
|
|
#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
|
|
#define cpu_notifier(fn, pri) { \
|
|
static struct notifier_block fn##_nb = \
|
|
{ .notifier_call = fn, .priority = pri }; \
|
|
register_cpu_notifier(&fn##_nb); \
|
|
}
|
|
|
|
#define __cpu_notifier(fn, pri) { \
|
|
static struct notifier_block fn##_nb = \
|
|
{ .notifier_call = fn, .priority = pri }; \
|
|
__register_cpu_notifier(&fn##_nb); \
|
|
}
|
|
#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
|
|
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
|
#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
|
#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
extern int register_cpu_notifier(struct notifier_block *nb);
|
|
extern int __register_cpu_notifier(struct notifier_block *nb);
|
|
extern void unregister_cpu_notifier(struct notifier_block *nb);
|
|
extern void __unregister_cpu_notifier(struct notifier_block *nb);
|
|
#else
|
|
|
|
#ifndef MODULE
|
|
extern int register_cpu_notifier(struct notifier_block *nb);
|
|
extern int __register_cpu_notifier(struct notifier_block *nb);
|
|
#else
|
|
static inline int register_cpu_notifier(struct notifier_block *nb)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int __register_cpu_notifier(struct notifier_block *nb)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
static inline void unregister_cpu_notifier(struct notifier_block *nb)
|
|
{
|
|
}
|
|
|
|
static inline void __unregister_cpu_notifier(struct notifier_block *nb)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
void smpboot_thread_init(void);
|
|
int cpu_up(unsigned int cpu);
|
|
void notify_cpu_starting(unsigned int cpu);
|
|
extern void cpu_maps_update_begin(void);
|
|
extern void cpu_maps_update_done(void);
|
|
|
|
#define cpu_notifier_register_begin cpu_maps_update_begin
|
|
#define cpu_notifier_register_done cpu_maps_update_done
|
|
|
|
#else /* CONFIG_SMP */
|
|
|
|
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
|
#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
|
|
|
static inline int register_cpu_notifier(struct notifier_block *nb)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int __register_cpu_notifier(struct notifier_block *nb)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void unregister_cpu_notifier(struct notifier_block *nb)
|
|
{
|
|
}
|
|
|
|
static inline void __unregister_cpu_notifier(struct notifier_block *nb)
|
|
{
|
|
}
|
|
|
|
static inline void cpu_maps_update_begin(void)
|
|
{
|
|
}
|
|
|
|
static inline void cpu_maps_update_done(void)
|
|
{
|
|
}
|
|
|
|
static inline void cpu_notifier_register_begin(void)
|
|
{
|
|
}
|
|
|
|
static inline void cpu_notifier_register_done(void)
|
|
{
|
|
}
|
|
|
|
static inline void smpboot_thread_init(void)
|
|
{
|
|
}
|
|
|
|
#endif /* CONFIG_SMP */
|
|
extern struct bus_type cpu_subsys;
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
/* Stop CPUs going up and down. */
|
|
|
|
extern void cpu_hotplug_begin(void);
|
|
extern void cpu_hotplug_done(void);
|
|
extern void get_online_cpus(void);
|
|
extern void put_online_cpus(void);
|
|
extern void cpu_hotplug_disable(void);
|
|
extern void cpu_hotplug_enable(void);
|
|
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
|
|
#define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
|
|
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
|
|
#define __register_hotcpu_notifier(nb) __register_cpu_notifier(nb)
|
|
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
|
|
#define __unregister_hotcpu_notifier(nb) __unregister_cpu_notifier(nb)
|
|
void clear_tasks_mm_cpumask(int cpu);
|
|
int cpu_down(unsigned int cpu);
|
|
|
|
#else /* CONFIG_HOTPLUG_CPU */
|
|
|
|
static inline void cpu_hotplug_begin(void) {}
|
|
static inline void cpu_hotplug_done(void) {}
|
|
#define get_online_cpus() do { } while (0)
|
|
#define put_online_cpus() do { } while (0)
|
|
#define cpu_hotplug_disable() do { } while (0)
|
|
#define cpu_hotplug_enable() do { } while (0)
|
|
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
|
#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
|
/* These aren't inline functions due to a GCC bug. */
|
|
#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
|
|
#define __register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
|
|
#define unregister_hotcpu_notifier(nb) ({ (void)(nb); })
|
|
#define __unregister_hotcpu_notifier(nb) ({ (void)(nb); })
|
|
#endif /* CONFIG_HOTPLUG_CPU */
|
|
|
|
#ifdef CONFIG_PM_SLEEP_SMP
|
|
extern int disable_nonboot_cpus(void);
|
|
extern void enable_nonboot_cpus(void);
|
|
#else /* !CONFIG_PM_SLEEP_SMP */
|
|
static inline int disable_nonboot_cpus(void) { return 0; }
|
|
static inline void enable_nonboot_cpus(void) {}
|
|
#endif /* !CONFIG_PM_SLEEP_SMP */
|
|
|
|
enum cpuhp_state {
|
|
CPUHP_OFFLINE,
|
|
CPUHP_ONLINE,
|
|
};
|
|
|
|
void cpu_startup_entry(enum cpuhp_state state);
|
|
|
|
void cpu_idle_poll_ctrl(bool enable);
|
|
|
|
void arch_cpu_idle(void);
|
|
void arch_cpu_idle_prepare(void);
|
|
void arch_cpu_idle_enter(void);
|
|
void arch_cpu_idle_exit(void);
|
|
void arch_cpu_idle_dead(void);
|
|
|
|
DECLARE_PER_CPU(bool, cpu_dead_idle);
|
|
|
|
int cpu_report_state(int cpu);
|
|
int cpu_check_up_prepare(int cpu);
|
|
void cpu_set_state_online(int cpu);
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
bool cpu_wait_death(unsigned int cpu, int seconds);
|
|
bool cpu_report_death(void);
|
|
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
|
|
|
#define IDLE_START 1
|
|
#define IDLE_END 2
|
|
|
|
void idle_notifier_register(struct notifier_block *n);
|
|
void idle_notifier_unregister(struct notifier_block *n);
|
|
void idle_notifier_call_chain(unsigned long val);
|
|
|
|
#endif /* _LINUX_CPU_H_ */
|