* 4.9/tmp-e6b0c64f:
Linux 4.9.41
ASoC: dpcm: Avoid putting stream state to STOP when FE stream is paused
ASoC: Intel: Skylake: Release FW ctx in cleanup
scsi: bfa: Increase requested firmware version to 3.2.5.1
scsi: snic: Return error code on memory allocation failure
scsi: fnic: Avoid sending reset to firmware when another reset is in progress
HID: ignore Petzl USB headlamp
ASoC: Intel: bytcr-rt5640: fix settings in internal clock mode
perf/x86: Set pmu->module in Intel PMU modules
x86/platform/intel-mid: Rename 'spidev' to 'mrfld_spidev'
ALSA: usb-audio: test EP_FLAG_RUNNING at urb completion
ARCv2: IRQ: Call entry/exit functions for chained handlers in MCIP
sh_eth: enable RX descriptor word 0 shift on SH7734
ASoC: fsl_ssi: set fifo watermark to more reliable value
net: usb: asix_devices: add .reset_resume for USB PM
nvmem: imx-ocotp: Fix wrong register size
arm64: mm: fix show_pte KERN_CONT fallout
vfio-pci: Handle error from pci_iomap
video: fbdev: cobalt_lcdfb: Handle return NULL error from devm_ioremap
perf symbols: Robustify reading of build-id from sysfs
perf tools: Install tools/lib/traceevent plugins with install-bin
xfrm: Don't use sk_family for socket policy lookups
tools lib traceevent: Fix prev/next_prio for deadline tasks
Xen: ARM: Zero reserved fields of xatp before making hypervisor call
Btrfs: adjust outstanding_extents counter properly when dio write is split
benet: stricter vxlan offloading check in be_features_check
Btrfs: fix lockdep warning about log_mutex
Btrfs: use down_read_nested to make lockdep silent
usb: gadget: Fix copy/pasted error message
ACPI / scan: Prefer devices without _HID/_CID for _ADR matching
ARM: s3c2410_defconfig: Fix invalid values for NF_CT_PROTO_*
perf probe: Fix to get correct modname from elf header
ARM64: zynqmp: Fix i2c node's compatible string
ARM64: zynqmp: Fix W=1 dtc 1.4 warnings
usb: dwc3: omap: fix race of pm runtime with irq handler in probe
dmaengine: ti-dma-crossbar: Add some 'of_node_put()' in error path.
l2tp: consider '::' as wildcard address in l2tp_ip6 socket lookup
dmaengine: ioatdma: workaround SKX ioatdma version
dmaengine: ioatdma: Add Skylake PCI Dev ID
openrisc: Add _text symbol to fix ksym build error
irqchip/mxs: Enable SKIP_SET_WAKE and MASK_ON_SUSPEND
ASoC: nau8825: fix invalid configuration in Pre-Scalar of FLL
spi: dw: Make debugfs name unique between instances
ASoC: tlv320aic3x: Mark the RESET register as volatile
irqchip/keystone: Fix "scheduling while atomic" on rt
vfio-pci: use 32-bit comparisons for register address for gcc-4.5
drm/msm: Verify that MSM_SUBMIT_BO_FLAGS are set
drm/msm: Put back the vaddr in submit_reloc()
drm/msm: Ensure that the hardware write pointer is valid
net/mlx4_core: Fix raw qp flow steering rules under SRIOV
net/mlx4: Remove BUG_ON from ICM allocation routine
net/mlx4_core: Use-after-free causes a resource leak in flow-steering detach
ipv6: Should use consistent conditional judgement for ip6 fragment between __ip6_append_data and ip6_finish_output
net/mlx5: Disable RoCE on the e-switch management port under switchdev mode
ARM: dts: n900: Mark eMMC slot with no-sdio and no-sd flags
ARM: dts: am57xx-idk: Put USB2 port in peripheral mode
dt-bindings: input: Specify the interrupt number of TPS65217 power button
dt-bindings: power/supply: Update TPS65217 properties
ARM: omap2+: fixing wrong strcat for Non-NULL terminated string
r8169: add support for RTL8168 series add-on card.
x86/mce/AMD: Make the init code more robust
device-dax: fix sysfs duplicate warnings
net: skb_needs_check() accepts CHECKSUM_NONE for tx
pstore: Use dynamic spinlock initializer
pstore: Correctly initialize spinlock and flags
pstore: Allow prz to control need for locking
v4l: s5c73m3: fix negation operator
dentry name snapshots
ipmi/watchdog: fix watchdog timeout set on reboot
RDMA/uverbs: Fix the check for port number
sched/cgroup: Move sched_online_group() back into css_online() to fix crash
mailbox: handle empty message in tx_tick
mailbox: skip complete wait event if timer expired
mailbox: always wait in mbox_send_message for blocking Tx mode
wil6210: fix deadlock when using fw_no_recovery option
ath10k: fix null deref on wmi-tlv when trying spectral scan
isdn/i4l: fix buffer overflow
isdn: Fix a sleep-in-atomic bug
net: phy: Do not perform software reset for Generic PHY
nfc: fdp: fix NULL pointer dereference
nfc: Fix hangup of RC-S380* in port100_send_ack()
smp/hotplug: Replace BUG_ON and react useful
smp/hotplug: Move unparking of percpu threads to the control CPU
drm: rcar-du: Simplify and fix probe error handling
Staging: comedi: comedi_fops: Avoid orphaned proc entry
Revert "powerpc/numa: Fix percpu allocations to be NUMA aware"
KVM: PPC: Book3S HV: Save/restore host values of debug registers
KVM: PPC: Book3S HV: Restore critical SPRs to host values on guest exit
drm/nouveau/bar/gf100: fix access to upper half of BAR2
drm/nouveau/disp/nv50-: bump max chans to 21
drm/vmwgfx: Fix gcc-7.1.1 warning
md/raid5: add thread_group worker async_tx_issue_pending_all
KVM: PPC: Book3S HV: Enable TM before accessing TM registers
crypto: authencesn - Fix digest_null crash
NFSv4.1: Fix a race where CB_NOTIFY_LOCK fails to wake a waiter
NFS: invalidate file size when taking a lock.
powerpc/pseries: Fix of_node_put() underflow during reconfig remove
parisc: Suspend lockup detectors before system halt
parisc: Extend disabled preemption in copy_user_page
parisc: Prevent TLB speculation on flushed pages on CPUs that only support equivalent aliases
ALSA: hda - Add missing NVIDIA GPU codec IDs to patch table
ALSA: fm801: Initialize chip after IRQ handler is registered
jfs: Don't clear SGID when inheriting ACLs
net: reduce skb_warn_bad_offload() noise
pstore: Make spinlock per zone instead of global
af_key: Add lock to key dump
ANDROID: sched/fair: Add a backup_cpu to find_best_target
ANDROID: sched/fair: Try to estimate possible idle states.
ANDROID: sched/fair: Sync task util before EAS wakeup
ANDROID: Revert "sched/fair: ensure utilization signals are synchronized before use"
ANDROID: sched/fair: kick nohz idle balance for misfit task
ANDROID: sched/fair: Update signals of nohz cpus if we are going idle
ANDROID: events: add tracepoint for find_best_target
ANDROID: sched/fair: streamline find_best_target heuristics
UPSTREAM: cpufreq: schedutil: Trace frequency only if it has changed
UPSTREAM: cpufreq: schedutil: Avoid reducing frequency of busy CPUs prematurely
UPSTREAM: cpufreq: schedutil: Refactor sugov_next_freq_shared()
UPSTREAM: cpufreq: schedutil: Pass sg_policy to get_next_freq()
UPSTREAM: cpufreq: schedutil: Rectify comment in sugov_irq_work() function
UPSTREAM: cpufreq: schedutil: irq-work and mutex are only used in slow path
UPSTREAM: cpufreq: schedutil: enable fast switch earlier
UPSTREAM: cpufreq: schedutil: Avoid indented labels
ANDROID: sched/{fair,tune}: simplify fair.c code
ANDROID: FIXUP: sched/tune: update accouting before CPU capacity
ANDROID: sched: walt: fix window misalignment when HZ=300
ANDROID: sched/fair: Remove remnants of commit 608d49484e
ANDROID: schedstats/eas: guard properly to avoid breaking non-smp schedstats users
ANDROID: sched/tune: don't use schedtune before it is ready
ANDROID: sched/fair: use SCHED_CAPACITY_SCALE for energy normalization
ANDROID: sched/{fair,tune}: use reciprocal_value to compute boost margin
ANDROID: sched/tune: Initialize raw_spin_lock in boosted_groups
ANDROID: sched/tune: report when SchedTune has not been initialized
ANDROID: sched/tune: fix sched_energy_diff tracepoint
ANDROID: sched/tune: increase group count to 5
ANDROID: cpufreq/schedutil: use boosted_cpu_util for PELT to match WALT
ANDROID: sched/fair: Fix sched_group_energy() to support per-cpu capacity states
ANDROID: sched/fair: discount task contribution to find CPU with lowest utilization
ANDROID: sched/fair: ensure utilization signals are synchronized before use
ANDROID: sched/fair: remove task util from own cpu when placing waking task
ANDROID: trace:sched: Make util_avg in load_avg trace reflect PELT/WALT as used
ANDROID: sched/fair: Add eas (& cas) specific rq, sd and task stats
ANDROID: sched/core: Fix PELT jump to max OPP upon util increase
ANDROID: sched: EAS & 'single cpu per cluster'/cpu hotplug interoperability
UPSTREAM: sched/core: Fix group_entity's share update
UPSTREAM: sched/fair: Propagate asynchrous detach
UPSTREAM: sched/fair: Propagate load during synchronous attach/detach
UPSTREAM: sched/fair: Factorize attach/detach entity
ANDROID: sched/fair: Simplify idle_idx handling in select_idle_sibling()
ANDROID: sched/fair: refactor find_best_target() for simplicity
ANDROID: sched/fair: Change cpu iteration order in find_best_target()
ANDROID: sched/core: Add first cpu w/ max/min orig capacity to root domain
ANDROID: sched/core: Remove remnants of commit fd5c98da1a42
ANDROID: sched: Remove sysctl_sched_is_big_little
ANDROID: sched/fair: Code !is_big_little path into select_energy_cpu_brute()
ANDROID: EAS: sched/fair: Re-integrate 'honor sync wakeups' into wakeup path
ANDROID: Fixup!: sched/fair.c: Set SchedTune specific struct energy_env.task
ANDROID: sched/fair: Energy-aware wake-up task placement
ANDROID: sched/fair: Add energy_diff dead-zone margin
ANDROID: arm64: Set SD_ASYM_CPUCAPACITY sched_domain flag on DIE level
UPSTREAM: sched/fair: Fix incorrect comment for capacity_margin
UPSTREAM: sched/fair: Avoid pulling tasks from non-overloaded higher capacity groups
UPSTREAM: sched/fair: Add per-CPU min capacity to sched_group_capacity
UPSTREAM: sched/fair: Consider spare capacity in find_idlest_group()
UPSTREAM: sched/fair: Compute task/cpu utilization at wake-up correctly
ANDROID: Partial Revert: "ANDROID: sched: Add cpu capacity awareness to wakeup balancing"
ANDROID: sched/fair: Decommission energy_aware_wake_cpu()
ANDROID: Revert "WIP: sched: Consider spare cpu capacity at task wake-up"
FROM-LIST: cpufreq: schedutil: Redefine the rate_limit_us tunable
ANDROID: cpufreq: schedutil: add up/down frequency transition rate limits
ANDROID: trace/sched: add rq utilization signal for WALT
ANDROID: sched/cpufreq: make schedutil use WALT signal
ANDROID: sched: cpufreq: use rt_avg as estimate of required RT CPU capacity
UPSTREAM: cpufreq: schedutil: move slow path from workqueue to SCHED_FIFO task
ANDROID: sched/cpufreq: fix tunables for schedfreq governor
cpufreq: interactive governor drops bits in time calculation
DEBUG: sched/fair: Fix sched_load_avg_cpu events for task_groups
DEBUG: sched/fair: Fix missing sched_load_avg_cpu events
sched: Consider misfit tasks when load-balancing
ANDROID: binder: Don't BUG_ON(!spin_is_locked()).
Conflicts:
drivers/cpufreq/cpufreq_interactive.c
include/trace/events/sched.h
kernel/cpu.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/tune.c
kernel/sched/walt.c
kernel/sched/walt.h
Change-Id: I04f9e2c5cc6c638742472465080eaa0473f1c799
Signed-off-by: Kyle Yan <kyan@codeaurora.org>
310 lines
8.7 KiB
C
310 lines
8.7 KiB
C
/*
|
|
* Tick related global functions
|
|
*/
|
|
#ifndef _LINUX_TICK_H
|
|
#define _LINUX_TICK_H
|
|
|
|
#include <linux/clockchips.h>
|
|
#include <linux/irqflags.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/context_tracking_state.h>
|
|
#include <linux/cpumask.h>
|
|
#include <linux/sched.h>
|
|
|
|
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
|
extern void __init tick_init(void);
|
|
/* Should be core only, but ARM BL switcher requires it */
|
|
extern void tick_suspend_local(void);
|
|
/* Should be core only, but XEN resume magic and ARM BL switcher require it */
|
|
extern void tick_resume_local(void);
|
|
extern void tick_handover_do_timer(void);
|
|
extern void tick_cleanup_dead_cpu(int cpu);
|
|
#else /* CONFIG_GENERIC_CLOCKEVENTS */
|
|
static inline void tick_init(void) { }
|
|
static inline void tick_suspend_local(void) { }
|
|
static inline void tick_resume_local(void) { }
|
|
static inline void tick_handover_do_timer(void) { }
|
|
static inline void tick_cleanup_dead_cpu(int cpu) { }
|
|
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
|
|
|
|
extern u64 jiffy_to_ktime_ns(u64 *now, u64 *jiffy_ktime_ns);
|
|
|
|
#if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_SUSPEND)
|
|
extern void tick_freeze(void);
|
|
extern void tick_unfreeze(void);
|
|
#else
|
|
static inline void tick_freeze(void) { }
|
|
static inline void tick_unfreeze(void) { }
|
|
#endif
|
|
|
|
#ifdef CONFIG_TICK_ONESHOT
|
|
extern void tick_irq_enter(void);
|
|
# ifndef arch_needs_cpu
|
|
# define arch_needs_cpu() (0)
|
|
# endif
|
|
# else
|
|
static inline void tick_irq_enter(void) { }
|
|
#endif
|
|
|
|
#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
|
|
extern void hotplug_cpu__broadcast_tick_pull(int dead_cpu);
|
|
#else
|
|
static inline void hotplug_cpu__broadcast_tick_pull(int dead_cpu) { }
|
|
#endif
|
|
|
|
enum tick_broadcast_mode {
|
|
TICK_BROADCAST_OFF,
|
|
TICK_BROADCAST_ON,
|
|
TICK_BROADCAST_FORCE,
|
|
};
|
|
|
|
enum tick_broadcast_state {
|
|
TICK_BROADCAST_EXIT,
|
|
TICK_BROADCAST_ENTER,
|
|
};
|
|
|
|
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
|
extern void tick_broadcast_control(enum tick_broadcast_mode mode);
|
|
#else
|
|
static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { }
|
|
#endif /* BROADCAST */
|
|
|
|
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
|
extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state);
|
|
#else
|
|
static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
static inline void tick_broadcast_enable(void)
|
|
{
|
|
tick_broadcast_control(TICK_BROADCAST_ON);
|
|
}
|
|
static inline void tick_broadcast_disable(void)
|
|
{
|
|
tick_broadcast_control(TICK_BROADCAST_OFF);
|
|
}
|
|
static inline void tick_broadcast_force(void)
|
|
{
|
|
tick_broadcast_control(TICK_BROADCAST_FORCE);
|
|
}
|
|
static inline int tick_broadcast_enter(void)
|
|
{
|
|
return tick_broadcast_oneshot_control(TICK_BROADCAST_ENTER);
|
|
}
|
|
static inline void tick_broadcast_exit(void)
|
|
{
|
|
tick_broadcast_oneshot_control(TICK_BROADCAST_EXIT);
|
|
}
|
|
|
|
enum tick_dep_bits {
|
|
TICK_DEP_BIT_POSIX_TIMER = 0,
|
|
TICK_DEP_BIT_PERF_EVENTS = 1,
|
|
TICK_DEP_BIT_SCHED = 2,
|
|
TICK_DEP_BIT_CLOCK_UNSTABLE = 3
|
|
};
|
|
|
|
#define TICK_DEP_MASK_NONE 0
|
|
#define TICK_DEP_MASK_POSIX_TIMER (1 << TICK_DEP_BIT_POSIX_TIMER)
|
|
#define TICK_DEP_MASK_PERF_EVENTS (1 << TICK_DEP_BIT_PERF_EVENTS)
|
|
#define TICK_DEP_MASK_SCHED (1 << TICK_DEP_BIT_SCHED)
|
|
#define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE)
|
|
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
|
extern bool tick_nohz_enabled;
|
|
extern int tick_nohz_tick_stopped(void);
|
|
extern void tick_nohz_idle_enter(void);
|
|
extern void tick_nohz_idle_exit(void);
|
|
extern void tick_nohz_irq_exit(void);
|
|
extern ktime_t tick_nohz_get_sleep_length(void);
|
|
extern unsigned long tick_nohz_get_idle_calls(void);
|
|
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
|
|
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
|
|
#else /* !CONFIG_NO_HZ_COMMON */
|
|
#define tick_nohz_enabled (0)
|
|
static inline int tick_nohz_tick_stopped(void) { return 0; }
|
|
static inline void tick_nohz_idle_enter(void) { }
|
|
static inline void tick_nohz_idle_exit(void) { }
|
|
|
|
static inline ktime_t tick_nohz_get_sleep_length(void)
|
|
{
|
|
ktime_t len = { .tv64 = NSEC_PER_SEC/HZ };
|
|
|
|
return len;
|
|
}
|
|
static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
|
|
static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
|
|
#endif /* !CONFIG_NO_HZ_COMMON */
|
|
|
|
#ifdef CONFIG_NO_HZ_FULL
|
|
extern bool tick_nohz_full_running;
|
|
extern cpumask_var_t tick_nohz_full_mask;
|
|
extern cpumask_var_t housekeeping_mask;
|
|
|
|
static inline bool tick_nohz_full_enabled(void)
|
|
{
|
|
if (!context_tracking_is_enabled())
|
|
return false;
|
|
|
|
return tick_nohz_full_running;
|
|
}
|
|
|
|
static inline bool tick_nohz_full_cpu(int cpu)
|
|
{
|
|
if (!tick_nohz_full_enabled())
|
|
return false;
|
|
|
|
return cpumask_test_cpu(cpu, tick_nohz_full_mask);
|
|
}
|
|
|
|
static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
|
|
{
|
|
if (tick_nohz_full_enabled())
|
|
cpumask_or(mask, mask, tick_nohz_full_mask);
|
|
}
|
|
|
|
static inline int housekeeping_any_cpu(void)
|
|
{
|
|
return cpumask_any_and(housekeeping_mask, cpu_online_mask);
|
|
}
|
|
|
|
extern void tick_nohz_dep_set(enum tick_dep_bits bit);
|
|
extern void tick_nohz_dep_clear(enum tick_dep_bits bit);
|
|
extern void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit);
|
|
extern void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit);
|
|
extern void tick_nohz_dep_set_task(struct task_struct *tsk,
|
|
enum tick_dep_bits bit);
|
|
extern void tick_nohz_dep_clear_task(struct task_struct *tsk,
|
|
enum tick_dep_bits bit);
|
|
extern void tick_nohz_dep_set_signal(struct signal_struct *signal,
|
|
enum tick_dep_bits bit);
|
|
extern void tick_nohz_dep_clear_signal(struct signal_struct *signal,
|
|
enum tick_dep_bits bit);
|
|
|
|
/*
|
|
* The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases
|
|
* on top of static keys.
|
|
*/
|
|
static inline void tick_dep_set(enum tick_dep_bits bit)
|
|
{
|
|
if (tick_nohz_full_enabled())
|
|
tick_nohz_dep_set(bit);
|
|
}
|
|
|
|
static inline void tick_dep_clear(enum tick_dep_bits bit)
|
|
{
|
|
if (tick_nohz_full_enabled())
|
|
tick_nohz_dep_clear(bit);
|
|
}
|
|
|
|
static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit)
|
|
{
|
|
if (tick_nohz_full_cpu(cpu))
|
|
tick_nohz_dep_set_cpu(cpu, bit);
|
|
}
|
|
|
|
static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
|
|
{
|
|
if (tick_nohz_full_cpu(cpu))
|
|
tick_nohz_dep_clear_cpu(cpu, bit);
|
|
}
|
|
|
|
static inline void tick_dep_set_task(struct task_struct *tsk,
|
|
enum tick_dep_bits bit)
|
|
{
|
|
if (tick_nohz_full_enabled())
|
|
tick_nohz_dep_set_task(tsk, bit);
|
|
}
|
|
static inline void tick_dep_clear_task(struct task_struct *tsk,
|
|
enum tick_dep_bits bit)
|
|
{
|
|
if (tick_nohz_full_enabled())
|
|
tick_nohz_dep_clear_task(tsk, bit);
|
|
}
|
|
static inline void tick_dep_set_signal(struct signal_struct *signal,
|
|
enum tick_dep_bits bit)
|
|
{
|
|
if (tick_nohz_full_enabled())
|
|
tick_nohz_dep_set_signal(signal, bit);
|
|
}
|
|
static inline void tick_dep_clear_signal(struct signal_struct *signal,
|
|
enum tick_dep_bits bit)
|
|
{
|
|
if (tick_nohz_full_enabled())
|
|
tick_nohz_dep_clear_signal(signal, bit);
|
|
}
|
|
|
|
extern void tick_nohz_full_kick_cpu(int cpu);
|
|
extern void __tick_nohz_task_switch(void);
|
|
#else
|
|
static inline int housekeeping_any_cpu(void)
|
|
{
|
|
cpumask_t available;
|
|
int cpu;
|
|
|
|
cpumask_andnot(&available, cpu_online_mask, cpu_isolated_mask);
|
|
cpu = cpumask_any(&available);
|
|
if (cpu >= nr_cpu_ids)
|
|
cpu = smp_processor_id();
|
|
|
|
return cpu;
|
|
}
|
|
static inline bool tick_nohz_full_enabled(void) { return false; }
|
|
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
|
|
static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
|
|
|
|
static inline void tick_dep_set(enum tick_dep_bits bit) { }
|
|
static inline void tick_dep_clear(enum tick_dep_bits bit) { }
|
|
static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
|
|
static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
|
|
static inline void tick_dep_set_task(struct task_struct *tsk,
|
|
enum tick_dep_bits bit) { }
|
|
static inline void tick_dep_clear_task(struct task_struct *tsk,
|
|
enum tick_dep_bits bit) { }
|
|
static inline void tick_dep_set_signal(struct signal_struct *signal,
|
|
enum tick_dep_bits bit) { }
|
|
static inline void tick_dep_clear_signal(struct signal_struct *signal,
|
|
enum tick_dep_bits bit) { }
|
|
|
|
static inline void tick_nohz_full_kick_cpu(int cpu) { }
|
|
static inline void __tick_nohz_task_switch(void) { }
|
|
#endif
|
|
|
|
static inline const struct cpumask *housekeeping_cpumask(void)
|
|
{
|
|
#ifdef CONFIG_NO_HZ_FULL
|
|
if (tick_nohz_full_enabled())
|
|
return housekeeping_mask;
|
|
#endif
|
|
return cpu_possible_mask;
|
|
}
|
|
|
|
static inline bool is_housekeeping_cpu(int cpu)
|
|
{
|
|
#ifdef CONFIG_NO_HZ_FULL
|
|
if (tick_nohz_full_enabled())
|
|
return cpumask_test_cpu(cpu, housekeeping_mask);
|
|
#endif
|
|
return !cpu_isolated(cpu);
|
|
}
|
|
|
|
static inline void housekeeping_affine(struct task_struct *t)
|
|
{
|
|
#ifdef CONFIG_NO_HZ_FULL
|
|
if (tick_nohz_full_enabled())
|
|
set_cpus_allowed_ptr(t, housekeeping_mask);
|
|
|
|
#endif
|
|
}
|
|
|
|
static inline void tick_nohz_task_switch(void)
|
|
{
|
|
if (tick_nohz_full_enabled())
|
|
__tick_nohz_task_switch();
|
|
}
|
|
|
|
ktime_t *get_next_event_cpu(unsigned int cpu);
|
|
#endif
|