Merge 4.9.248 into android-4.9-q
Linux 4.9.248
x86/uprobes: Do not use prefixes.nbytes when looping over prefixes.bytes
Input: i8042 - fix error return code in i8042_setup_aux()
i2c: qup: Fix error return code in qup_i2c_bam_schedule_desc()
gfs2: check for empty rgrp tree in gfs2_ri_update
* tracing: Fix userstacktrace option for instances
kernel/trace/trace.c
kernel/trace/trace.h
spi: bcm2835: Release the DMA channel if probe fails after dma_init
spi: bcm2835: Fix use-after-free on unbind
spi: bcm-qspi: Fix use-after-free on unbind
* spi: Introduce device-managed SPI controller allocation
drivers/spi/spi.c
include/linux/spi/spi.h
iommu/amd: Set DTE[IntTabLen] to represent 512 IRTEs
i2c: imx: Check for I2SR_IAL after every byte
i2c: imx: Fix reset of I2SR_IAL flag
cifs: fix potential use-after-free in cifs_echo_request()
ftrace: Fix updating FTRACE_FL_TRAMP
* tty: Fix ->session locking
drivers/tty/tty_io.c
include/linux/tty.h
ALSA: hda/generic: Add option to enforce preferred_dacs pairs
ALSA: hda/realtek - Add new codec supported for ALC897
* tty: Fix ->pgrp locking in tiocspgrp()
drivers/tty/tty_io.c
USB: serial: option: add support for Thales Cinterion EXS82
USB: serial: option: add Fibocom NL668 variants
USB: serial: ch341: sort device-id entries
USB: serial: ch341: add new Product ID for CH341A
USB: serial: kl5kusb105: fix memleak on open
* usb: gadget: f_fs: Use local copy of descriptors for userspace copy
drivers/usb/gadget/function/f_fs.c
* vlan: consolidate VLAN parsing code and limit max parsing depth
include/linux/if_vlan.h
include/net/inet_ecn.h
pinctrl: baytrail: Fix pin being driven low for a while on gpiod_get(..., GPIOD_OUT_HIGH)
pinctrl: baytrail: Replace WARN with dev_info_once when setting direct-irq pin to output
btrfs: sysfs: init devices outside of the chunk_mutex
RDMA/i40iw: Address an mmap handler exploit in i40iw
* spi: Fix controller unregister order harder
drivers/spi/spi.c
Input: i8042 - add ByteSpeed touchpad to noloop table
* Input: xpad - support Ardwiino Controllers
drivers/input/joystick/xpad.c
dt-bindings: net: correct interrupt flags in examples
net/mlx5: Fix wrong address reclaim when command interface is down
net: pasemi: fix error return code in pasemi_mac_open()
cxgb3: fix error return code in t3_sge_alloc_qset()
net/x25: prevent a couple of overflows
ibmvnic: Fix TX completion error handling
ibmvnic: Ensure that SCRQ entry reads are correctly ordered
netfilter: bridge: reset skb->pkt_type after NF_INET_POST_ROUTING traversal
* bonding: wait for sysfs kobject destruction before freeing struct slave
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_sysfs_slave.c
include/net/bonding.h
usbnet: ipheth: fix connectivity with iOS 14
rose: Fix Null pointer dereference in rose_send_frame()
net/af_iucv: set correct sk_protocol for child sockets
ANDROID: cuttlefish_defconfig: Disable CONFIG_KSM
Merge 4.9.247 into android-4.9-q
Linux 4.9.247
* USB: core: Fix regression in Hercules audio card
drivers/usb/core/quirks.c
* USB: core: add endpoint-blacklist quirk
drivers/usb/core/config.c
drivers/usb/core/quirks.c
drivers/usb/core/usb.h
include/linux/usb/quirks.h
* regulator: workaround self-referent regulators
drivers/regulator/core.c
* regulator: avoid resolve_supply() infinite recursion
drivers/regulator/core.c
x86/speculation: Fix prctl() when spectre_v2_user={seccomp,prctl},ibpb
usb: gadget: Fix memleak in gadgetfs_fill_super
* usb: gadget: f_midi: Fix memleak in f_midi_alloc
drivers/usb/gadget/function/f_midi.c
* USB: core: Change %pK for __user pointers to %px
drivers/usb/core/devio.c
perf probe: Fix to die_entrypc() returns error correctly
platform/x86: toshiba_acpi: Fix the wrong variable assignment
can: gs_usb: fix endianess problem with candleLight firmware
efivarfs: revert "fix memory leak in efivarfs_create()"
ibmvnic: fix NULL pointer dereference in ibmvic_reset_crq
net: ena: set initial DMA width to avoid intel iommu issue
nfc: s3fwrn5: use signed integer for parsing GPIO numbers
IB/mthca: fix return value of error branch in mthca_init_cq()
bnxt_en: Release PCI regions when DMA mask setup fails during probe.
video: hyperv_fb: Fix the cache type when mapping the VRAM
bnxt_en: fix error return code in bnxt_init_board()
* scsi: ufs: Fix race between shutdown and runtime resume flow
drivers/scsi/ufs/ufshcd.c
batman-adv: set .owner to THIS_MODULE
phy: tegra: xusb: Fix dangling pointer on probe failure
perf/x86: fix sysfs type mismatches
scsi: target: iscsi: Fix cmd abort fabric stop race
scsi: libiscsi: Fix NOP race condition
dmaengine: pl330: _prep_dma_memcpy: Fix wrong burst size
* proc: don't allow async path resolution of /proc/self components
fs/proc/self.c
x86/xen: don't unbind uninitialized lock_kicker_irq
dmaengine: xilinx_dma: use readl_poll_timeout_atomic variant
HID: hid-sensor-hub: Fix issue with devices with no report ID
Input: i8042 - allow insmod to succeed on devices without an i8042 controller
* HID: cypress: Support Varmilo Keyboards' media hotkeys
drivers/hid/hid-ids.h
ALSA: hda/hdmi: fix incorrect locking in hdmi_pcm_close
ALSA: hda/hdmi: Use single mutex unlock in error paths
* arm64: pgtable: Fix pte_accessible()
arch/arm64/include/asm/pgtable.h
btrfs: inode: Verify inode mode to avoid NULL pointer dereference
btrfs: tree-checker: Enhance chunk checker to validate chunk profile
* PCI: Add device even if driver attach failed
drivers/pci/bus.c
btrfs: fix lockdep splat when reading qgroup config on mount
mm/userfaultfd: do not access vma->vm_mm after calling handle_userfault()
perf event: Check ref_reloc_sym before using it
* BACKPORT: arm64: SW PAN: Point saved ttbr0 at the zero page when switching to init_mm
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/mmu_context.h
Merge 4.9.246 into android-4.9-q
Linux 4.9.246
x86/microcode/intel: Check patch signature before saving microcode for early loading
s390/cpum_sf.c: fix file permission for cpum_sfb_size
mac80211: free sta in sta_info_insert_finish() on errors
mac80211: minstrel: fix tx status processing corner case
mac80211: minstrel: remove deferred sampling code
xtensa: disable preemption around cache alias management calls
* regulator: fix memory leak with repeated set_machine_constraints()
drivers/regulator/core.c
iio: accel: kxcjk1013: Replace is_smo8500_device with an acpi_type enum
* ext4: fix bogus warning in ext4_update_dx_flag()
fs/ext4/ext4.h
efivarfs: fix memory leak in efivarfs_create()
tty: serial: imx: keep console clocks always on
ALSA: mixart: Fix mutex deadlock
* ALSA: ctl: fix error path at adding user-defined element set
sound/core/control.c
powerpc/uaccess-flush: fix missing includes in kup-radix.h
* libfs: fix error cast of negative value in simple_attr_write()
fs/libfs.c
xfs: revert "xfs: fix rmap key and record comparison functions"
regulator: ti-abb: Fix array out of bound read access on the first transition
MIPS: Alchemy: Fix memleak in alchemy_clk_setup_cpu
can: m_can: m_can_handle_state_change(): fix state change
can: peak_usb: fix potential integer overflow on shift of a int
can: dev: can_restart(): post buffer from the right context
perf lock: Don't free "lock_seq_stat" if read_count isn't zero
ARM: dts: imx50-evk: Fix the chip select 1 IOMUX
arm: dts: imx6qdl-udoo: fix rgmii phy-mode for ksz9031 phy
MIPS: export has_transparent_hugepage() for modules
Input: adxl34x - clean up a data type in adxl34x_probe()
* vfs: remove lockdep bogosity in __sb_start_write
fs/super.c
* arm64: psci: Avoid printing in cpu_psci_cpu_die()
arch/arm64/kernel/psci.c
pinctrl: rockchip: enable gpio pclk for rockchip_gpio_to_irq
mlxsw: core: Use variable timeout for EMAD retries
net: ftgmac100: Fix crash when removing driver
tcp: only postpone PROBE_RTT if RTT is < current min_rtt estimate
net: usb: qmi_wwan: Set DTR quirk for MR400
sctp: change to hold/put transport for proto_unreach_timer
qlcnic: fix error return code in qlcnic_83xx_restart_hw()
net: x25: Increase refcnt of "struct x25_neigh" in x25_rx_call_request
net/mlx4_core: Fix init_hca fields offset
* netlabel: fix an uninitialized warning in netlbl_unlabel_staticlist()
net/netlabel/netlabel_unlabeled.c
* netlabel: fix our progress tracking in netlbl_unlabel_staticlist()
net/netlabel/netlabel_unlabeled.c
net: Have netpoll bring-up DSA management interface
* net: bridge: add missing counters to ndo_get_stats64 callback
net/bridge/br_device.c
net: b44: fix error return code in b44_init_one()
* inet_diag: Fix error path to cancel the meseage in inet_req_diag_fill()
net/ipv4/inet_diag.c
devlink: Add missing genlmsg_cancel() in devlink_nl_sb_port_pool_fill()
bnxt_en: read EEPROM A2h address using page 0
atm: nicstar: Unmap DMA on send error
* ah6: fix error return code in ah6_input()
net/ipv6/ah6.c
Merge 4.9.245 into android-4.9-q
Linux 4.9.245
ACPI: GED: fix -Wformat
KVM: x86: clflushopt should be treated as a no-op by emulation
mac80211: always wind down STA state
Input: sunkbd - avoid use-after-free in teardown paths
powerpc/8xx: Always fault when _PAGE_ACCESSED is not set
i2c: mux: pca954x: Add missing pca9546 definition to chip_desc
i2c: imx: Fix external abort on interrupt in exit paths
i2c: imx: use clk notifier for rate changes
powerpc/64s: flush L1D after user accesses
powerpc/uaccess: Evaluate macro arguments once, before user access is allowed
powerpc: Fix __clear_user() with KUAP enabled
powerpc: Implement user_access_begin and friends
powerpc: Add a framework for user access tracking
powerpc/64s: flush L1D on kernel entry
powerpc/64s: move some exception handlers out of line
powerpc/64s: Define MASKABLE_RELON_EXCEPTION_PSERIES_OOL
Linux 4.9.244
Convert trailing spaces and periods in path components
* ext4: fix leaking sysfs kobject after failed mount
fs/ext4/super.c
* reboot: fix overflow parsing reboot cpu number
kernel/reboot.c
* Revert "kernel/reboot.c: convert simple_strtoul to kstrtoint"
kernel/reboot.c
* perf/core: Fix race in the perf_mmap_close() function
kernel/events/core.c
xen/events: block rogue events for some time
xen/events: defer eoi in case of excessive number of events
xen/events: use a common cpu hotplug hook for event channels
xen/events: switch user event channels to lateeoi model
xen/pciback: use lateeoi irq binding
xen/scsiback: use lateeoi irq binding
xen/netback: use lateeoi irq binding
xen/blkback: use lateeoi irq binding
xen/events: add a new "late EOI" evtchn framework
xen/events: fix race in evtchn_fifo_unmask()
xen/events: add a proper barrier to 2-level uevent unmasking
xen/events: avoid removing an event channel while handling it
* perf/core: Fix a memory leak in perf_event_parse_addr_filter()
kernel/events/core.c
* perf/core: Fix crash when using HW tracing kernel filters
kernel/events/core.c
* perf/core: Fix bad use of igrab()
include/linux/perf_event.h
kernel/events/core.c
x86/speculation: Allow IBPB to be conditionally enabled on CPUs with always-on STIBP
* random32: make prandom_u32() output unpredictable
drivers/char/random.c
include/linux/prandom.h
kernel/time/timer.c
lib/random32.c
net: Update window_clamp if SOCK_RCVBUF is set
net/x25: Fix null-ptr-deref in x25_connect
net/af_iucv: fix null pointer dereference on shutdown
* IPv6: Set SIT tunnel hard_header_len to zero
net/ipv6/sit.c
* swiotlb: fix "x86: Don't panic if can not alloc buffer for swiotlb"
lib/swiotlb.c
pinctrl: amd: fix incorrect way to disable debounce filter
pinctrl: amd: use higher precision for 512 RtcClk
drm/gma500: Fix out-of-bounds access to struct drm_device.vblank[]
* don't dump the threads that had been already exiting when zapped.
kernel/exit.c
ocfs2: initialize ip_next_orphan
mei: protect mei_cl_mtu from null dereference
usb: cdc-acm: Add DISABLE_ECHO for Renesas USB Download mode
* ext4: unlock xattr_sem properly in ext4_inline_data_truncate()
fs/ext4/inline.c
* ext4: correctly report "not supported" for {usr,grp}jquota when !CONFIG_QUOTA
fs/ext4/super.c
* perf: Fix get_recursion_context()
kernel/events/internal.h
cosa: Add missing kfree in error path of cosa_write
* of/address: Fix of_node memory leak in of_dma_is_coherent
drivers/of/address.c
xfs: fix a missing unlock on error in xfs_fs_map_blocks
xfs: fix rmap key and record comparison functions
xfs: fix flags argument to rmap lookup when converting shared file rmaps
pinctrl: aspeed: Fix GPI only function problem.
iommu/amd: Increase interrupt remapping table limit to 512 entries
scsi: scsi_dh_alua: Avoid crash during alua_bus_detach()
* cfg80211: regulatory: Fix inconsistent format argument
net/wireless/reg.c
mac80211: fix use of skb payload instead of header
drm/amdgpu: perform srbm soft reset always on SDMA resume
scsi: hpsa: Fix memory leak in hpsa_init_one()
gfs2: check for live vs. read-only file system in gfs2_fitrim
gfs2: Free rd_bits later in gfs2_clear_rgrpd to fix use-after-free
usb: gadget: goku_udc: fix potential crashes in probe
ath9k_htc: Use appropriate rs_datalen type
geneve: add transport ports in route lookup for geneve
i40e: Memory leak in i40e_config_iwarp_qvlist
i40e: Fix of memory leak and integer truncation in i40e_virtchnl.c
i40e: Wrong truncation from u16 to u8
i40e: add num_vectors checker in iwarp handler
i40e: Fix a potential NULL pointer dereference
* pinctrl: devicetree: Avoid taking direct reference to device name string
drivers/pinctrl/devicetree.c
Btrfs: fix missing error return if writeback for extent buffer never started
xfs: flush new eof page on truncate to avoid post-eof corruption
can: peak_usb: peak_usb_get_ts_time(): fix timestamp wrapping
can: peak_usb: add range checking in decode operations
can: can_create_echo_skb(): fix echo skb generation: always use skb_clone()
can: dev: __can_get_echo_skb(): fix real payload length return value for RTR frames
can: dev: can_get_echo_skb(): prevent call to kfree_skb() in hard IRQ context
ALSA: hda: prevent undefined shift in snd_hdac_ext_bus_get_link()
perf tools: Add missing swap for ino_generation
* net: xfrm: fix a race condition during allocing spi
net/xfrm/xfrm_state.c
* genirq: Let GENERIC_IRQ_IPI select IRQ_DOMAIN_HIERARCHY
kernel/irq/Kconfig
btrfs: reschedule when cloning lots of extents
* time: Prevent undefined behaviour in timespec64_to_ns()
include/linux/time64.h
mm: mempolicy: fix potential pte_unmap_unlock pte error
gfs2: Wake up when sd_glock_disposal becomes zero
* ring-buffer: Fix recursion protection transitions between interrupt context
kernel/trace/ring_buffer.c
* regulator: defer probe when trying to get voltage from unresolved supply
drivers/regulator/core.c
UPSTREAM: thermal/drivers/hisi: Remove bogus const from function return type
* UPSTREAM: net/ipv6: don't reinitialize ndev->cnf.addr_gen_mode on new inet6_dev
net/ipv6/addrconf.c
UPSTREAM: tee: shm: fix use-after-free via temporarily dropped reference
UPSTREAM: Documentation: ip-sysctl.txt: document addr_gen_mode
UPSTREAM: net: crypto set sk to NULL when af_alg_release.
* UPSTREAM: ipv6: don't auto-add link-local address to lag ports
net/ipv6/addrconf.c
* UPSTREAM: ipv6: ndisc: RFC-ietf-6man-ra-pref64-09 is now published as RFC8781
include/net/ndisc.h
* UPSTREAM: binder: fix incorrect cmd to binder_stat_br
drivers/android/binder.c
* UPSTREAM: arm64: SW PAN: Update saved ttbr0 value on enter_lazy_tlb
arch/arm64/include/asm/mmu_context.h
UPSTREAM: staging: android: vsoc: fix copy_from_user overrun
Merge 4.9.243 into android-4.9-q
Linux 4.9.243
powercap: restrict energy meter to root access
Merge 4.9.242 into android-4.9-q
Linux 4.9.242
Revert "ARC: entry: fix potential EFA clobber when TIF_SYSCALL_TRACE"
ARC: stack unwinding: avoid indefinite looping
* USB: Add NO_LPM quirk for Kingston flash drive
drivers/usb/core/quirks.c
USB: serial: option: add Telit FN980 composition 0x1055
USB: serial: option: add LE910Cx compositions 0x1203, 0x1230, 0x1231
USB: serial: cyberjack: fix write-URB completion race
serial: txx9: add missing platform_driver_unregister() on error in serial_txx9_init
serial: 8250_mtk: Fix uart_get_baud_rate warning
* fork: fix copy_process(CLONE_PARENT) race with the exiting ->real_parent
kernel/fork.c
* vt: Disable KD_FONT_OP_COPY
drivers/tty/vt/vt.c
ACPI: NFIT: Fix comparison to '-ENXIO'
vsock: use ns_capable_noaudit() on socket create
* scsi: core: Don't start concurrent async scan on same host
drivers/scsi/scsi_scan.c
* of: Fix reserved-memory overlap detection
drivers/of/of_reserved_mem.c
x86/kexec: Use up-to-dated screen_info copy to fill boot params
ARM: dts: sun4i-a10: fix cpu_alert temperature
* tracing: Fix out of bounds write in get_trace_buf
kernel/trace/trace.c
* ftrace: Handle tracing when switching between context
kernel/trace/trace.h
* ftrace: Fix recursion check for NMI test
kernel/trace/trace.h
* kthread_worker: prevent queuing delayed work from timer_fn when it is being canceled
kernel/kthread.c
* ALSA: usb-audio: Add implicit feedback quirk for Qu-16
sound/usb/pcm.c
Fonts: Replace discarded const qualifier
gianfar: Account for Tx PTP timestamp in the skb headroom
gianfar: Replace skb_realloc_headroom with skb_cow_head for PTP
tipc: fix use-after-free in tipc_bcast_get_mode
xen/events: don't use chip_data for legacy IRQs
staging: octeon: Drop on uncorrectable alignment or FCS error
staging: octeon: repair "fixed-link" support
staging: comedi: cb_pcidas: Allow 2-channel commands for AO subdevice
* KVM: arm64: Fix AArch32 handling of DBGD{CCINT,SCRext} and DBGVCR
arch/arm64/include/asm/kvm_host.h
* device property: Don't clear secondary pointer for shared primary firmware node
drivers/base/core.c
* device property: Keep secondary firmware node secondary by type
drivers/base/core.c
ARM: s3c24xx: fix missing system reset
ARM: samsung: fix PM debug build with DEBUG_LL but !MMU
hil/parisc: Disable HIL driver when it gets stuck
cachefiles: Handle readpage error correctly
* arm64: berlin: Select DW_APB_TIMER_OF
arch/arm64/Kconfig.platforms
* tty: make FONTX ioctl use the tty pointer they were actually passed
drivers/tty/vt/vt_ioctl.c
rtc: rx8010: don't modify the global rtc ops
vringh: fix __vringh_iov() when riov and wiov are different
* ring-buffer: Return 0 on success from ring_buffer_resize()
kernel/trace/ring_buffer.c
9P: Cast to loff_t before multiplying
libceph: clear con->out_msg on Policy::stateful_server faults
ceph: promote to unsigned long long before shifting
ia64: fix build error with !COREDUMP
ubi: check kthread_should_stop() after the setting of task state
ubifs: dent: Fix some potential memory leaks while iterating entries
powerpc/powernv/elog: Fix race while processing OPAL error log event.
powerpc: Warn about use of smt_snooze_delay
iio:gyro:itg3200: Fix timestamp alignment and prevent data leak.
iio:adc:ti-adc12138 Fix alignment issue with timestamp
iio:light:si1145: Fix timestamp alignment and prevent data leak.
dmaengine: dma-jz4780: Fix race in jz4780_dma_tx_status
* vt: keyboard, extend func_buf_lock to readers
drivers/tty/vt/keyboard.c
* vt: keyboard, simplify vt_kdgkbsent
drivers/tty/vt/keyboard.c
usb: host: fsl-mph-dr-of: check return of dma_set_mask()
* usb: dwc3: core: don't trigger runtime pm when remove driver
drivers/usb/dwc3/core.c
* usb: dwc3: core: add phy cleanup for probe error handling
drivers/usb/dwc3/core.c
btrfs: fix use-after-free on readahead extent after failure to create it
btrfs: cleanup cow block on error
btrfs: reschedule if necessary when logging directory items
scsi: mptfusion: Fix null pointer dereferences in mptscsih_remove()
w1: mxc_w1: Fix timeout resolution problem leading to bus error
acpi-cpufreq: Honor _PSD table setting on new AMD CPUs
ACPI: debug: don't allow debugging when ACPI is disabled
ACPI: video: use ACPI backlight for HP 635 Notebook
ACPI / extlog: Check for RDMSR failure
NFS: fix nfs_path in case of a rename retry
* fs: Don't invalidate page buffers in block_write_full_page()
fs/buffer.c
leds: bcm6328, bcm6358: use devres LED registering function
perf/x86/amd/ibs: Fix raw sample data accumulation
perf/x86/amd/ibs: Don't include randomized bits in get_ibs_op_count()
md/raid5: fix oops during stripe resizing
ARM: dts: s5pv210: remove dedicated 'audio-subsystem' node
ARM: dts: s5pv210: move PMU node out of clock controller
ARM: dts: s5pv210: remove DMA controller bus node name to fix dtschema warnings
memory: emif: Remove bogus debugfs error handling
gfs2: add validation checks for size of superblock
* ext4: Detect already used quota file early
fs/ext4/super.c
drivers: watchdog: rdc321x_wdt: Fix race condition bugs
net: 9p: initialize sun_server.sun_path to have addr's value only when addr is valid
clk: ti: clockdomain: fix static checker warning
md/bitmap: md_bitmap_get_counter returns wrong blocks
power: supply: test_power: add missing newlines when printing parameters by sysfs
bus/fsl_mc: Do not rely on caller to provide non NULL mc_io
drivers/net/wan/hdlc_fr: Correctly handle special skb->protocol values
* arm64/mm: return cpu_all_mask when node is NUMA_NO_NODE
arch/arm64/include/asm/numa.h
USB: adutux: fix debugging
cpufreq: sti-cpufreq: add stih418 support
* kgdb: Make "kgdbcon" work properly with "kgdb_earlycon"
kernel/debug/debug_core.c
* printk: reduce LOG_BUF_SHIFT range for H8300
init/Kconfig
mmc: via-sdmmc: Fix data race bug
media: tw5864: check status of tw5864_frameinterval_get
ath10k: fix VHT NSS calculation when STBC is enabled
video: fbdev: pvr2fb: initialize variables
xfs: fix realtime bitmap/summary file truncation when growing rt volume
ARM: 8997/2: hw_breakpoint: Handle inexact watchpoint addresses
um: change sigio_spinlock to a mutex
* f2fs: fix to check segment boundary during SIT page readahead
fs/f2fs/checkpoint.c
* f2fs: add trace exit in exception path
fs/f2fs/checkpoint.c
sparc64: remove mm_cpumask clearing to fix kthread_use_mm race
powerpc/powernv/smp: Fix spurious DBG() warning
mlxsw: core: Fix use-after-free in mlxsw_emad_trans_finish()
* fscrypt: use EEXIST when file already uses different policy
fs/crypto/policy.c
* fscrypto: move ioctl processing more fully into common code
fs/crypto/policy.c
fs/ext4/ext4.h
fs/ext4/ioctl.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
* fscrypt: return -EXDEV for incompatible rename or link into encrypted dir
fs/crypto/policy.c
fs/ext4/namei.c
fs/f2fs/namei.c
ata: sata_rcar: Fix DMA boundary mask
mtd: lpddr: Fix bad logic in print_drs_error
p54: avoid accessing the data mapped to streaming DMA
* fuse: fix page dereference after free
fs/fuse/dev.c
arch/x86/amd/ibs: Fix re-arming IBS Fetch
tipc: fix memory leak caused by tipc_buf_append()
ravb: Fix bit fields checking in ravb_hwtstamp_get()
efivarfs: Replace invalid slashes with exclamation marks in dentries.
powerpc/powernv/opal-dump : Use IRQ_HANDLED instead of numbers in interrupt handler
* scripts/setlocalversion: make git describe output more reliable
scripts/setlocalversion
SUNRPC: ECONNREFUSED should cause a rebind.
* ANDROID: Temporarily disable XFRM_USER_COMPAT filtering
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
* BACKPORT: xfrm/compat: Translate 32-bit user_policy from sockptr
include/net/xfrm.h
net/xfrm/xfrm_state.c
* BACKPORT: xfrm/compat: Add 32=>64-bit messages translator
include/net/xfrm.h
net/xfrm/Kconfig
net/xfrm/xfrm_user.c
* UPSTREAM: xfrm/compat: Attach xfrm dumps to 64=>32 bit translator
net/xfrm/xfrm_user.c
* BACKPORT: xfrm/compat: Add 64=>32-bit messages translator
include/net/xfrm.h
net/xfrm/xfrm_user.c
* BACKPORT: xfrm: Provide API to register translator module
include/net/xfrm.h
net/xfrm/Kconfig
net/xfrm/Makefile
net/xfrm/xfrm_state.c
* UPSTREAM: mm/sl[uo]b: export __kmalloc_track(_node)_caller
mm/slub.c
ANDROID: Publish uncompressed Image on aarch64
* ANDROID: Makefile: append BUILD_NUMBER to version string when defined
Makefile
Change-Id: I345c9bde484cf008679253982f61b2a833527c3e
Signed-off-by: Lucas Wei <lucaswei@google.com>
2036 lines
58 KiB
C
2036 lines
58 KiB
C
/*
|
|
* linux/kernel/timer.c
|
|
*
|
|
* Kernel internal timers
|
|
*
|
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
|
*
|
|
* 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
|
|
*
|
|
* 1997-09-10 Updated NTP code according to technical memorandum Jan '96
|
|
* "A Kernel Model for Precision Timekeeping" by Dave Mills
|
|
* 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
|
|
* serialize accesses to xtime/lost_ticks).
|
|
* Copyright (C) 1998 Andrea Arcangeli
|
|
* 1999-03-10 Improved NTP compatibility by Ulrich Windl
|
|
* 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
|
|
* 2000-10-05 Implemented scalable SMP per-CPU timer handling.
|
|
* Copyright (C) 2000, 2001, 2002 Ingo Molnar
|
|
* Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
|
|
*/
|
|
|
|
#include <linux/kernel_stat.h>
|
|
#include <linux/export.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/init.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/pid_namespace.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/thread_info.h>
|
|
#include <linux/time.h>
|
|
#include <linux/jiffies.h>
|
|
#include <linux/posix-timers.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/tick.h>
|
|
#include <linux/kallsyms.h>
|
|
#include <linux/irq_work.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/sysctl.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/compat.h>
|
|
#include <linux/random.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
#include <asm/unistd.h>
|
|
#include <asm/div64.h>
|
|
#include <asm/timex.h>
|
|
#include <asm/io.h>
|
|
|
|
#include "tick-internal.h"
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
#include <trace/events/timer.h>
|
|
|
|
__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
|
|
|
|
EXPORT_SYMBOL(jiffies_64);
|
|
|
|
/*
|
|
* The timer wheel has LVL_DEPTH array levels. Each level provides an array of
|
|
* LVL_SIZE buckets. Each level is driven by its own clock and therefor each
|
|
* level has a different granularity.
|
|
*
|
|
* The level granularity is: LVL_CLK_DIV ^ lvl
|
|
* The level clock frequency is: HZ / (LVL_CLK_DIV ^ level)
|
|
*
|
|
* The array level of a newly armed timer depends on the relative expiry
|
|
* time. The farther the expiry time is away the higher the array level and
|
|
* therefor the granularity becomes.
|
|
*
|
|
* Contrary to the original timer wheel implementation, which aims for 'exact'
|
|
* expiry of the timers, this implementation removes the need for recascading
|
|
* the timers into the lower array levels. The previous 'classic' timer wheel
|
|
* implementation of the kernel already violated the 'exact' expiry by adding
|
|
* slack to the expiry time to provide batched expiration. The granularity
|
|
* levels provide implicit batching.
|
|
*
|
|
* This is an optimization of the original timer wheel implementation for the
|
|
* majority of the timer wheel use cases: timeouts. The vast majority of
|
|
* timeout timers (networking, disk I/O ...) are canceled before expiry. If
|
|
* the timeout expires it indicates that normal operation is disturbed, so it
|
|
* does not matter much whether the timeout comes with a slight delay.
|
|
*
|
|
* The only exception to this are networking timers with a small expiry
|
|
* time. They rely on the granularity. Those fit into the first wheel level,
|
|
* which has HZ granularity.
|
|
*
|
|
* We don't have cascading anymore. timers with a expiry time above the
|
|
* capacity of the last wheel level are force expired at the maximum timeout
|
|
* value of the last wheel level. From data sampling we know that the maximum
|
|
* value observed is 5 days (network connection tracking), so this should not
|
|
* be an issue.
|
|
*
|
|
* The currently chosen array constants values are a good compromise between
|
|
* array size and granularity.
|
|
*
|
|
* This results in the following granularity and range levels:
|
|
*
|
|
* HZ 1000 steps
|
|
* Level Offset Granularity Range
|
|
* 0 0 1 ms 0 ms - 63 ms
|
|
* 1 64 8 ms 64 ms - 511 ms
|
|
* 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s)
|
|
* 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s)
|
|
* 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m)
|
|
* 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m)
|
|
* 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h)
|
|
* 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d)
|
|
* 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d)
|
|
*
|
|
* HZ 300
|
|
* Level Offset Granularity Range
|
|
* 0 0 3 ms 0 ms - 210 ms
|
|
* 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s)
|
|
* 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s)
|
|
* 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m)
|
|
* 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m)
|
|
* 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h)
|
|
* 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h)
|
|
* 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d)
|
|
* 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
|
|
*
|
|
* HZ 250
|
|
* Level Offset Granularity Range
|
|
* 0 0 4 ms 0 ms - 255 ms
|
|
* 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s)
|
|
* 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s)
|
|
* 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m)
|
|
* 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m)
|
|
* 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h)
|
|
* 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h)
|
|
* 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d)
|
|
* 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
|
|
*
|
|
* HZ 100
|
|
* Level Offset Granularity Range
|
|
* 0 0 10 ms 0 ms - 630 ms
|
|
* 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s)
|
|
* 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s)
|
|
* 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m)
|
|
* 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m)
|
|
* 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h)
|
|
* 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d)
|
|
* 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
|
|
*/
|
|
|
|
/* Clock divisor for the next level */
|
|
#define LVL_CLK_SHIFT 3
|
|
#define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT)
|
|
#define LVL_CLK_MASK (LVL_CLK_DIV - 1)
|
|
#define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT)
|
|
#define LVL_GRAN(n) (1UL << LVL_SHIFT(n))
|
|
|
|
/*
|
|
* The time start value for each level to select the bucket at enqueue
|
|
* time.
|
|
*/
|
|
#define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
|
|
|
|
/* Size of each clock level */
|
|
#define LVL_BITS 6
|
|
#define LVL_SIZE (1UL << LVL_BITS)
|
|
#define LVL_MASK (LVL_SIZE - 1)
|
|
#define LVL_OFFS(n) ((n) * LVL_SIZE)
|
|
|
|
/* Level depth */
|
|
#if HZ > 100
|
|
# define LVL_DEPTH 9
|
|
# else
|
|
# define LVL_DEPTH 8
|
|
#endif
|
|
|
|
/* The cutoff (max. capacity of the wheel) */
|
|
#define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH))
|
|
#define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
|
|
|
|
/*
|
|
* The resulting wheel size. If NOHZ is configured we allocate two
|
|
* wheels so we have a separate storage for the deferrable timers.
|
|
*/
|
|
#define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH)
|
|
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
|
# define NR_BASES 2
|
|
# define BASE_STD 0
|
|
# define BASE_DEF 1
|
|
#else
|
|
# define NR_BASES 1
|
|
# define BASE_STD 0
|
|
# define BASE_DEF 0
|
|
#endif
|
|
|
|
struct timer_base {
|
|
spinlock_t lock;
|
|
struct timer_list *running_timer;
|
|
unsigned long clk;
|
|
unsigned long next_expiry;
|
|
unsigned int cpu;
|
|
bool migration_enabled;
|
|
bool nohz_active;
|
|
bool is_idle;
|
|
bool must_forward_clk;
|
|
DECLARE_BITMAP(pending_map, WHEEL_SIZE);
|
|
struct hlist_head vectors[WHEEL_SIZE];
|
|
} ____cacheline_aligned;
|
|
|
|
static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
|
|
struct timer_base timer_base_deferrable;
|
|
static atomic_t deferrable_pending;
|
|
|
|
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
|
|
unsigned int sysctl_timer_migration = 1;
|
|
|
|
void timers_update_migration(bool update_nohz)
|
|
{
|
|
bool on = sysctl_timer_migration && tick_nohz_active;
|
|
unsigned int cpu;
|
|
|
|
/* Avoid the loop, if nothing to update */
|
|
if (this_cpu_read(timer_bases[BASE_STD].migration_enabled) == on)
|
|
return;
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
per_cpu(timer_bases[BASE_STD].migration_enabled, cpu) = on;
|
|
per_cpu(timer_bases[BASE_DEF].migration_enabled, cpu) = on;
|
|
per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
|
|
if (!update_nohz)
|
|
continue;
|
|
per_cpu(timer_bases[BASE_STD].nohz_active, cpu) = true;
|
|
per_cpu(timer_bases[BASE_DEF].nohz_active, cpu) = true;
|
|
per_cpu(hrtimer_bases.nohz_active, cpu) = true;
|
|
}
|
|
|
|
timer_base_deferrable.migration_enabled = on;
|
|
timer_base_deferrable.nohz_active = true;
|
|
}
|
|
|
|
int timer_migration_handler(struct ctl_table *table, int write,
|
|
void __user *buffer, size_t *lenp,
|
|
loff_t *ppos)
|
|
{
|
|
static DEFINE_MUTEX(mutex);
|
|
int ret;
|
|
|
|
mutex_lock(&mutex);
|
|
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
|
if (!ret && write)
|
|
timers_update_migration(false);
|
|
mutex_unlock(&mutex);
|
|
return ret;
|
|
}
|
|
#endif
|
|
|
|
static unsigned long round_jiffies_common(unsigned long j, int cpu,
|
|
bool force_up)
|
|
{
|
|
int rem;
|
|
unsigned long original = j;
|
|
|
|
/*
|
|
* We don't want all cpus firing their timers at once hitting the
|
|
* same lock or cachelines, so we skew each extra cpu with an extra
|
|
* 3 jiffies. This 3 jiffies came originally from the mm/ code which
|
|
* already did this.
|
|
* The skew is done by adding 3*cpunr, then round, then subtract this
|
|
* extra offset again.
|
|
*/
|
|
j += cpu * 3;
|
|
|
|
rem = j % HZ;
|
|
|
|
/*
|
|
* If the target jiffie is just after a whole second (which can happen
|
|
* due to delays of the timer irq, long irq off times etc etc) then
|
|
* we should round down to the whole second, not up. Use 1/4th second
|
|
* as cutoff for this rounding as an extreme upper bound for this.
|
|
* But never round down if @force_up is set.
|
|
*/
|
|
if (rem < HZ/4 && !force_up) /* round down */
|
|
j = j - rem;
|
|
else /* round up */
|
|
j = j - rem + HZ;
|
|
|
|
/* now that we have rounded, subtract the extra skew again */
|
|
j -= cpu * 3;
|
|
|
|
/*
|
|
* Make sure j is still in the future. Otherwise return the
|
|
* unmodified value.
|
|
*/
|
|
return time_is_after_jiffies(j) ? j : original;
|
|
}
|
|
|
|
/**
|
|
* __round_jiffies - function to round jiffies to a full second
|
|
* @j: the time in (absolute) jiffies that should be rounded
|
|
* @cpu: the processor number on which the timeout will happen
|
|
*
|
|
* __round_jiffies() rounds an absolute time in the future (in jiffies)
|
|
* up or down to (approximately) full seconds. This is useful for timers
|
|
* for which the exact time they fire does not matter too much, as long as
|
|
* they fire approximately every X seconds.
|
|
*
|
|
* By rounding these timers to whole seconds, all such timers will fire
|
|
* at the same time, rather than at various times spread out. The goal
|
|
* of this is to have the CPU wake up less, which saves power.
|
|
*
|
|
* The exact rounding is skewed for each processor to avoid all
|
|
* processors firing at the exact same time, which could lead
|
|
* to lock contention or spurious cache line bouncing.
|
|
*
|
|
* The return value is the rounded version of the @j parameter.
|
|
*/
|
|
unsigned long __round_jiffies(unsigned long j, int cpu)
|
|
{
|
|
return round_jiffies_common(j, cpu, false);
|
|
}
|
|
EXPORT_SYMBOL_GPL(__round_jiffies);
|
|
|
|
/**
|
|
* __round_jiffies_relative - function to round jiffies to a full second
|
|
* @j: the time in (relative) jiffies that should be rounded
|
|
* @cpu: the processor number on which the timeout will happen
|
|
*
|
|
* __round_jiffies_relative() rounds a time delta in the future (in jiffies)
|
|
* up or down to (approximately) full seconds. This is useful for timers
|
|
* for which the exact time they fire does not matter too much, as long as
|
|
* they fire approximately every X seconds.
|
|
*
|
|
* By rounding these timers to whole seconds, all such timers will fire
|
|
* at the same time, rather than at various times spread out. The goal
|
|
* of this is to have the CPU wake up less, which saves power.
|
|
*
|
|
* The exact rounding is skewed for each processor to avoid all
|
|
* processors firing at the exact same time, which could lead
|
|
* to lock contention or spurious cache line bouncing.
|
|
*
|
|
* The return value is the rounded version of the @j parameter.
|
|
*/
|
|
unsigned long __round_jiffies_relative(unsigned long j, int cpu)
|
|
{
|
|
unsigned long j0 = jiffies;
|
|
|
|
/* Use j0 because jiffies might change while we run */
|
|
return round_jiffies_common(j + j0, cpu, false) - j0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(__round_jiffies_relative);
|
|
|
|
/**
|
|
* round_jiffies - function to round jiffies to a full second
|
|
* @j: the time in (absolute) jiffies that should be rounded
|
|
*
|
|
* round_jiffies() rounds an absolute time in the future (in jiffies)
|
|
* up or down to (approximately) full seconds. This is useful for timers
|
|
* for which the exact time they fire does not matter too much, as long as
|
|
* they fire approximately every X seconds.
|
|
*
|
|
* By rounding these timers to whole seconds, all such timers will fire
|
|
* at the same time, rather than at various times spread out. The goal
|
|
* of this is to have the CPU wake up less, which saves power.
|
|
*
|
|
* The return value is the rounded version of the @j parameter.
|
|
*/
|
|
unsigned long round_jiffies(unsigned long j)
|
|
{
|
|
return round_jiffies_common(j, raw_smp_processor_id(), false);
|
|
}
|
|
EXPORT_SYMBOL_GPL(round_jiffies);
|
|
|
|
/**
|
|
* round_jiffies_relative - function to round jiffies to a full second
|
|
* @j: the time in (relative) jiffies that should be rounded
|
|
*
|
|
* round_jiffies_relative() rounds a time delta in the future (in jiffies)
|
|
* up or down to (approximately) full seconds. This is useful for timers
|
|
* for which the exact time they fire does not matter too much, as long as
|
|
* they fire approximately every X seconds.
|
|
*
|
|
* By rounding these timers to whole seconds, all such timers will fire
|
|
* at the same time, rather than at various times spread out. The goal
|
|
* of this is to have the CPU wake up less, which saves power.
|
|
*
|
|
* The return value is the rounded version of the @j parameter.
|
|
*/
|
|
unsigned long round_jiffies_relative(unsigned long j)
|
|
{
|
|
return __round_jiffies_relative(j, raw_smp_processor_id());
|
|
}
|
|
EXPORT_SYMBOL_GPL(round_jiffies_relative);
|
|
|
|
/**
|
|
* __round_jiffies_up - function to round jiffies up to a full second
|
|
* @j: the time in (absolute) jiffies that should be rounded
|
|
* @cpu: the processor number on which the timeout will happen
|
|
*
|
|
* This is the same as __round_jiffies() except that it will never
|
|
* round down. This is useful for timeouts for which the exact time
|
|
* of firing does not matter too much, as long as they don't fire too
|
|
* early.
|
|
*/
|
|
unsigned long __round_jiffies_up(unsigned long j, int cpu)
|
|
{
|
|
return round_jiffies_common(j, cpu, true);
|
|
}
|
|
EXPORT_SYMBOL_GPL(__round_jiffies_up);
|
|
|
|
/**
|
|
* __round_jiffies_up_relative - function to round jiffies up to a full second
|
|
* @j: the time in (relative) jiffies that should be rounded
|
|
* @cpu: the processor number on which the timeout will happen
|
|
*
|
|
* This is the same as __round_jiffies_relative() except that it will never
|
|
* round down. This is useful for timeouts for which the exact time
|
|
* of firing does not matter too much, as long as they don't fire too
|
|
* early.
|
|
*/
|
|
unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
|
|
{
|
|
unsigned long j0 = jiffies;
|
|
|
|
/* Use j0 because jiffies might change while we run */
|
|
return round_jiffies_common(j + j0, cpu, true) - j0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
|
|
|
|
/**
|
|
* round_jiffies_up - function to round jiffies up to a full second
|
|
* @j: the time in (absolute) jiffies that should be rounded
|
|
*
|
|
* This is the same as round_jiffies() except that it will never
|
|
* round down. This is useful for timeouts for which the exact time
|
|
* of firing does not matter too much, as long as they don't fire too
|
|
* early.
|
|
*/
|
|
unsigned long round_jiffies_up(unsigned long j)
|
|
{
|
|
return round_jiffies_common(j, raw_smp_processor_id(), true);
|
|
}
|
|
EXPORT_SYMBOL_GPL(round_jiffies_up);
|
|
|
|
/**
|
|
* round_jiffies_up_relative - function to round jiffies up to a full second
|
|
* @j: the time in (relative) jiffies that should be rounded
|
|
*
|
|
* This is the same as round_jiffies_relative() except that it will never
|
|
* round down. This is useful for timeouts for which the exact time
|
|
* of firing does not matter too much, as long as they don't fire too
|
|
* early.
|
|
*/
|
|
unsigned long round_jiffies_up_relative(unsigned long j)
|
|
{
|
|
return __round_jiffies_up_relative(j, raw_smp_processor_id());
|
|
}
|
|
EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
|
|
|
|
|
|
static inline unsigned int timer_get_idx(struct timer_list *timer)
|
|
{
|
|
return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT;
|
|
}
|
|
|
|
static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
|
|
{
|
|
timer->flags = (timer->flags & ~TIMER_ARRAYMASK) |
|
|
idx << TIMER_ARRAYSHIFT;
|
|
}
|
|
|
|
/*
|
|
* Helper function to calculate the array index for a given expiry
|
|
* time.
|
|
*/
|
|
static inline unsigned calc_index(unsigned expires, unsigned lvl)
|
|
{
|
|
if (expires & ~(UINT_MAX << LVL_SHIFT(lvl)))
|
|
expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl);
|
|
else
|
|
expires = expires >> LVL_SHIFT(lvl);
|
|
|
|
return LVL_OFFS(lvl) + (expires & LVL_MASK);
|
|
}
|
|
|
|
static inline unsigned int calc_index_min_granularity(unsigned int expires)
|
|
{
|
|
return LVL_OFFS(0) + ((expires >> LVL_SHIFT(0)) & LVL_MASK);
|
|
}
|
|
|
|
static int calc_wheel_index(unsigned long expires, unsigned long clk)
|
|
{
|
|
unsigned long delta = expires - clk;
|
|
unsigned int idx;
|
|
|
|
if (delta < LVL_START(1)) {
|
|
idx = calc_index_min_granularity(expires);
|
|
} else if (delta < LVL_START(2)) {
|
|
idx = calc_index(expires, 1);
|
|
} else if (delta < LVL_START(3)) {
|
|
idx = calc_index(expires, 2);
|
|
} else if (delta < LVL_START(4)) {
|
|
idx = calc_index(expires, 3);
|
|
} else if (delta < LVL_START(5)) {
|
|
idx = calc_index(expires, 4);
|
|
} else if (delta < LVL_START(6)) {
|
|
idx = calc_index(expires, 5);
|
|
} else if (delta < LVL_START(7)) {
|
|
idx = calc_index(expires, 6);
|
|
} else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
|
|
idx = calc_index(expires, 7);
|
|
} else if ((long) delta < 0) {
|
|
idx = clk & LVL_MASK;
|
|
} else {
|
|
/*
|
|
* Force expire obscene large timeouts to expire at the
|
|
* capacity limit of the wheel.
|
|
*/
|
|
if (delta >= WHEEL_TIMEOUT_CUTOFF)
|
|
expires = clk + WHEEL_TIMEOUT_MAX;
|
|
|
|
idx = calc_index(expires, LVL_DEPTH - 1);
|
|
}
|
|
return idx;
|
|
}
|
|
|
|
/*
|
|
* Enqueue the timer into the hash bucket, mark it pending in
|
|
* the bitmap and store the index in the timer flags.
|
|
*/
|
|
static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
|
|
unsigned int idx)
|
|
{
|
|
hlist_add_head(&timer->entry, base->vectors + idx);
|
|
__set_bit(idx, base->pending_map);
|
|
timer_set_idx(timer, idx);
|
|
}
|
|
|
|
static void
|
|
__internal_add_timer(struct timer_base *base, struct timer_list *timer)
|
|
{
|
|
unsigned int idx;
|
|
|
|
idx = calc_wheel_index(timer->expires, base->clk);
|
|
enqueue_timer(base, timer, idx);
|
|
}
|
|
|
|
static void
|
|
trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
|
|
{
|
|
if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
|
|
return;
|
|
|
|
/*
|
|
* TODO: This wants some optimizing similar to the code below, but we
|
|
* will do that when we switch from push to pull for deferrable timers.
|
|
*/
|
|
if (timer->flags & TIMER_DEFERRABLE) {
|
|
if (tick_nohz_full_cpu(base->cpu))
|
|
wake_up_nohz_cpu(base->cpu);
|
|
return;
|
|
}
|
|
|
|
/*
|
|
* We might have to IPI the remote CPU if the base is idle and the
|
|
* timer is not deferrable. If the other CPU is on the way to idle
|
|
* then it can't set base->is_idle as we hold the base lock:
|
|
*/
|
|
if (!base->is_idle)
|
|
return;
|
|
|
|
/* Check whether this is the new first expiring timer: */
|
|
if (time_after_eq(timer->expires, base->next_expiry))
|
|
return;
|
|
|
|
/*
|
|
* Set the next expiry time and kick the CPU so it can reevaluate the
|
|
* wheel:
|
|
*/
|
|
base->next_expiry = timer->expires;
|
|
wake_up_nohz_cpu(base->cpu);
|
|
}
|
|
|
|
static void
|
|
internal_add_timer(struct timer_base *base, struct timer_list *timer)
|
|
{
|
|
__internal_add_timer(base, timer);
|
|
trigger_dyntick_cpu(base, timer);
|
|
}
|
|
|
|
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
|
|
|
|
static struct debug_obj_descr timer_debug_descr;
|
|
|
|
static void *timer_debug_hint(void *addr)
|
|
{
|
|
return ((struct timer_list *) addr)->function;
|
|
}
|
|
|
|
static bool timer_is_static_object(void *addr)
|
|
{
|
|
struct timer_list *timer = addr;
|
|
|
|
return (timer->entry.pprev == NULL &&
|
|
timer->entry.next == TIMER_ENTRY_STATIC);
|
|
}
|
|
|
|
/*
|
|
* fixup_init is called when:
|
|
* - an active object is initialized
|
|
*/
|
|
static bool timer_fixup_init(void *addr, enum debug_obj_state state)
|
|
{
|
|
struct timer_list *timer = addr;
|
|
|
|
switch (state) {
|
|
case ODEBUG_STATE_ACTIVE:
|
|
del_timer_sync(timer);
|
|
debug_object_init(timer, &timer_debug_descr);
|
|
return true;
|
|
default:
|
|
return false;
|
|
}
|
|
}
|
|
|
|
/* Stub timer callback for improperly used timers. */
|
|
static void stub_timer(unsigned long data)
|
|
{
|
|
WARN_ON(1);
|
|
}
|
|
|
|
/*
|
|
* fixup_activate is called when:
|
|
* - an active object is activated
|
|
* - an unknown non-static object is activated
|
|
*/
|
|
static bool timer_fixup_activate(void *addr, enum debug_obj_state state)
|
|
{
|
|
struct timer_list *timer = addr;
|
|
|
|
switch (state) {
|
|
case ODEBUG_STATE_NOTAVAILABLE:
|
|
setup_timer(timer, stub_timer, 0);
|
|
return true;
|
|
|
|
case ODEBUG_STATE_ACTIVE:
|
|
WARN_ON(1);
|
|
|
|
default:
|
|
return false;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* fixup_free is called when:
|
|
* - an active object is freed
|
|
*/
|
|
static bool timer_fixup_free(void *addr, enum debug_obj_state state)
|
|
{
|
|
struct timer_list *timer = addr;
|
|
|
|
switch (state) {
|
|
case ODEBUG_STATE_ACTIVE:
|
|
del_timer_sync(timer);
|
|
debug_object_free(timer, &timer_debug_descr);
|
|
return true;
|
|
default:
|
|
return false;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* fixup_assert_init is called when:
|
|
* - an untracked/uninit-ed object is found
|
|
*/
|
|
static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state)
|
|
{
|
|
struct timer_list *timer = addr;
|
|
|
|
switch (state) {
|
|
case ODEBUG_STATE_NOTAVAILABLE:
|
|
setup_timer(timer, stub_timer, 0);
|
|
return true;
|
|
default:
|
|
return false;
|
|
}
|
|
}
|
|
|
|
static struct debug_obj_descr timer_debug_descr = {
|
|
.name = "timer_list",
|
|
.debug_hint = timer_debug_hint,
|
|
.is_static_object = timer_is_static_object,
|
|
.fixup_init = timer_fixup_init,
|
|
.fixup_activate = timer_fixup_activate,
|
|
.fixup_free = timer_fixup_free,
|
|
.fixup_assert_init = timer_fixup_assert_init,
|
|
};
|
|
|
|
static inline void debug_timer_init(struct timer_list *timer)
|
|
{
|
|
debug_object_init(timer, &timer_debug_descr);
|
|
}
|
|
|
|
static inline void debug_timer_activate(struct timer_list *timer)
|
|
{
|
|
debug_object_activate(timer, &timer_debug_descr);
|
|
}
|
|
|
|
static inline void debug_timer_deactivate(struct timer_list *timer)
|
|
{
|
|
debug_object_deactivate(timer, &timer_debug_descr);
|
|
}
|
|
|
|
static inline void debug_timer_free(struct timer_list *timer)
|
|
{
|
|
debug_object_free(timer, &timer_debug_descr);
|
|
}
|
|
|
|
static inline void debug_timer_assert_init(struct timer_list *timer)
|
|
{
|
|
debug_object_assert_init(timer, &timer_debug_descr);
|
|
}
|
|
|
|
static void do_init_timer(struct timer_list *timer, unsigned int flags,
|
|
const char *name, struct lock_class_key *key);
|
|
|
|
void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
|
|
const char *name, struct lock_class_key *key)
|
|
{
|
|
debug_object_init_on_stack(timer, &timer_debug_descr);
|
|
do_init_timer(timer, flags, name, key);
|
|
}
|
|
EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
|
|
|
|
void destroy_timer_on_stack(struct timer_list *timer)
|
|
{
|
|
debug_object_free(timer, &timer_debug_descr);
|
|
}
|
|
EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
|
|
|
|
#else
|
|
static inline void debug_timer_init(struct timer_list *timer) { }
|
|
static inline void debug_timer_activate(struct timer_list *timer) { }
|
|
static inline void debug_timer_deactivate(struct timer_list *timer) { }
|
|
static inline void debug_timer_assert_init(struct timer_list *timer) { }
|
|
#endif
|
|
|
|
static inline void debug_init(struct timer_list *timer)
|
|
{
|
|
debug_timer_init(timer);
|
|
trace_timer_init(timer);
|
|
}
|
|
|
|
static inline void
|
|
debug_activate(struct timer_list *timer, unsigned long expires)
|
|
{
|
|
debug_timer_activate(timer);
|
|
trace_timer_start(timer, expires, timer->flags);
|
|
}
|
|
|
|
static inline void debug_deactivate(struct timer_list *timer)
|
|
{
|
|
debug_timer_deactivate(timer);
|
|
trace_timer_cancel(timer);
|
|
}
|
|
|
|
static inline void debug_assert_init(struct timer_list *timer)
|
|
{
|
|
debug_timer_assert_init(timer);
|
|
}
|
|
|
|
static void do_init_timer(struct timer_list *timer, unsigned int flags,
|
|
const char *name, struct lock_class_key *key)
|
|
{
|
|
timer->entry.pprev = NULL;
|
|
timer->flags = flags | raw_smp_processor_id();
|
|
lockdep_init_map(&timer->lockdep_map, name, key, 0);
|
|
}
|
|
|
|
/**
|
|
* init_timer_key - initialize a timer
|
|
* @timer: the timer to be initialized
|
|
* @flags: timer flags
|
|
* @name: name of the timer
|
|
* @key: lockdep class key of the fake lock used for tracking timer
|
|
* sync lock dependencies
|
|
*
|
|
* init_timer_key() must be done to a timer prior calling *any* of the
|
|
* other timer functions.
|
|
*/
|
|
void init_timer_key(struct timer_list *timer, unsigned int flags,
|
|
const char *name, struct lock_class_key *key)
|
|
{
|
|
debug_init(timer);
|
|
do_init_timer(timer, flags, name, key);
|
|
}
|
|
EXPORT_SYMBOL(init_timer_key);
|
|
|
|
static inline void detach_timer(struct timer_list *timer, bool clear_pending)
|
|
{
|
|
struct hlist_node *entry = &timer->entry;
|
|
|
|
debug_deactivate(timer);
|
|
|
|
__hlist_del(entry);
|
|
if (clear_pending)
|
|
entry->pprev = NULL;
|
|
entry->next = LIST_POISON2;
|
|
}
|
|
|
|
static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
|
|
bool clear_pending)
|
|
{
|
|
unsigned idx = timer_get_idx(timer);
|
|
|
|
if (!timer_pending(timer))
|
|
return 0;
|
|
|
|
if (hlist_is_singular_node(&timer->entry, base->vectors + idx))
|
|
__clear_bit(idx, base->pending_map);
|
|
|
|
detach_timer(timer, clear_pending);
|
|
return 1;
|
|
}
|
|
|
|
static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
|
|
{
|
|
struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
|
|
|
|
/*
|
|
* If the timer is deferrable and NO_HZ_COMMON is set then we need
|
|
* to use the deferrable base.
|
|
*/
|
|
if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) {
|
|
base = &timer_base_deferrable;
|
|
if (tflags & TIMER_PINNED)
|
|
base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
|
|
}
|
|
return base;
|
|
}
|
|
|
|
static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
|
|
{
|
|
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
|
|
|
|
/*
|
|
* If the timer is deferrable and NO_HZ_COMMON is set then we need
|
|
* to use the deferrable base.
|
|
*/
|
|
if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) {
|
|
base = &timer_base_deferrable;
|
|
if (tflags & TIMER_PINNED)
|
|
base = this_cpu_ptr(&timer_bases[BASE_DEF]);
|
|
}
|
|
return base;
|
|
}
|
|
|
|
static inline struct timer_base *get_timer_base(u32 tflags)
|
|
{
|
|
return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
|
|
}
|
|
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
|
static inline struct timer_base *
|
|
get_target_base(struct timer_base *base, unsigned tflags)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
if ((tflags & TIMER_PINNED) || !base->migration_enabled)
|
|
return get_timer_this_cpu_base(tflags);
|
|
return get_timer_cpu_base(tflags, get_nohz_timer_target());
|
|
#else
|
|
return get_timer_this_cpu_base(tflags);
|
|
#endif
|
|
}
|
|
|
|
static inline void forward_timer_base(struct timer_base *base)
|
|
{
|
|
unsigned long jnow;
|
|
|
|
/*
|
|
* We only forward the base when we are idle or have just come out of
|
|
* idle (must_forward_clk logic), and have a delta between base clock
|
|
* and jiffies. In the common case, run_timers will take care of it.
|
|
*/
|
|
if (likely(!base->must_forward_clk))
|
|
return;
|
|
|
|
jnow = READ_ONCE(jiffies);
|
|
base->must_forward_clk = base->is_idle;
|
|
if ((long)(jnow - base->clk) < 2)
|
|
return;
|
|
|
|
/*
|
|
* If the next expiry value is > jiffies, then we fast forward to
|
|
* jiffies otherwise we forward to the next expiry value.
|
|
*/
|
|
if (time_after(base->next_expiry, jnow))
|
|
base->clk = jnow;
|
|
else
|
|
base->clk = base->next_expiry;
|
|
}
|
|
#else
|
|
static inline struct timer_base *
|
|
get_target_base(struct timer_base *base, unsigned tflags)
|
|
{
|
|
return get_timer_this_cpu_base(tflags);
|
|
}
|
|
|
|
static inline void forward_timer_base(struct timer_base *base) { }
|
|
#endif
|
|
|
|
|
|
/*
|
|
* We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
|
|
* that all timers which are tied to this base are locked, and the base itself
|
|
* is locked too.
|
|
*
|
|
* So __run_timers/migrate_timers can safely modify all timers which could
|
|
* be found in the base->vectors array.
|
|
*
|
|
* When a timer is migrating then the TIMER_MIGRATING flag is set and we need
|
|
* to wait until the migration is done.
|
|
*/
|
|
static struct timer_base *lock_timer_base(struct timer_list *timer,
|
|
unsigned long *flags)
|
|
__acquires(timer->base->lock)
|
|
{
|
|
for (;;) {
|
|
struct timer_base *base;
|
|
u32 tf;
|
|
|
|
/*
|
|
* We need to use READ_ONCE() here, otherwise the compiler
|
|
* might re-read @tf between the check for TIMER_MIGRATING
|
|
* and spin_lock().
|
|
*/
|
|
tf = READ_ONCE(timer->flags);
|
|
|
|
if (!(tf & TIMER_MIGRATING)) {
|
|
base = get_timer_base(tf);
|
|
spin_lock_irqsave(&base->lock, *flags);
|
|
if (timer->flags == tf)
|
|
return base;
|
|
spin_unlock_irqrestore(&base->lock, *flags);
|
|
}
|
|
cpu_relax();
|
|
}
|
|
}
|
|
|
|
static inline int
|
|
__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
|
|
{
|
|
struct timer_base *base, *new_base;
|
|
unsigned int idx = UINT_MAX;
|
|
unsigned long clk = 0, flags;
|
|
int ret = 0;
|
|
|
|
BUG_ON(!timer->function);
|
|
|
|
/*
|
|
* This is a common optimization triggered by the networking code - if
|
|
* the timer is re-modified to have the same timeout or ends up in the
|
|
* same array bucket then just return:
|
|
*/
|
|
if (timer_pending(timer)) {
|
|
/*
|
|
* The downside of this optimization is that it can result in
|
|
* larger granularity than you would get from adding a new
|
|
* timer with this expiry.
|
|
*/
|
|
if (timer->expires == expires)
|
|
return 1;
|
|
|
|
/*
|
|
* We lock timer base and calculate the bucket index right
|
|
* here. If the timer ends up in the same bucket, then we
|
|
* just update the expiry time and avoid the whole
|
|
* dequeue/enqueue dance.
|
|
*/
|
|
base = lock_timer_base(timer, &flags);
|
|
forward_timer_base(base);
|
|
|
|
clk = base->clk;
|
|
idx = calc_wheel_index(expires, clk);
|
|
|
|
/*
|
|
* Retrieve and compare the array index of the pending
|
|
* timer. If it matches set the expiry to the new value so a
|
|
* subsequent call will exit in the expires check above.
|
|
*/
|
|
if (idx == timer_get_idx(timer)) {
|
|
timer->expires = expires;
|
|
ret = 1;
|
|
goto out_unlock;
|
|
}
|
|
} else {
|
|
base = lock_timer_base(timer, &flags);
|
|
forward_timer_base(base);
|
|
}
|
|
|
|
ret = detach_if_pending(timer, base, false);
|
|
if (!ret && pending_only)
|
|
goto out_unlock;
|
|
|
|
new_base = get_target_base(base, timer->flags);
|
|
|
|
if (base != new_base) {
|
|
/*
|
|
* We are trying to schedule the timer on the new base.
|
|
* However we can't change timer's base while it is running,
|
|
* otherwise del_timer_sync() can't detect that the timer's
|
|
* handler yet has not finished. This also guarantees that the
|
|
* timer is serialized wrt itself.
|
|
*/
|
|
if (likely(base->running_timer != timer)) {
|
|
/* See the comment in lock_timer_base() */
|
|
timer->flags |= TIMER_MIGRATING;
|
|
|
|
spin_unlock(&base->lock);
|
|
base = new_base;
|
|
spin_lock(&base->lock);
|
|
WRITE_ONCE(timer->flags,
|
|
(timer->flags & ~TIMER_BASEMASK) | base->cpu);
|
|
forward_timer_base(base);
|
|
}
|
|
}
|
|
|
|
debug_activate(timer, expires);
|
|
|
|
timer->expires = expires;
|
|
/*
|
|
* If 'idx' was calculated above and the base time did not advance
|
|
* between calculating 'idx' and possibly switching the base, only
|
|
* enqueue_timer() and trigger_dyntick_cpu() is required. Otherwise
|
|
* we need to (re)calculate the wheel index via
|
|
* internal_add_timer().
|
|
*/
|
|
if (idx != UINT_MAX && clk == base->clk) {
|
|
enqueue_timer(base, timer, idx);
|
|
trigger_dyntick_cpu(base, timer);
|
|
} else {
|
|
internal_add_timer(base, timer);
|
|
}
|
|
|
|
out_unlock:
|
|
spin_unlock_irqrestore(&base->lock, flags);
|
|
|
|
return ret;
|
|
}
|
|
|
|
/**
|
|
* mod_timer_pending - modify a pending timer's timeout
|
|
* @timer: the pending timer to be modified
|
|
* @expires: new timeout in jiffies
|
|
*
|
|
* mod_timer_pending() is the same for pending timers as mod_timer(),
|
|
* but will not re-activate and modify already deleted timers.
|
|
*
|
|
* It is useful for unserialized use of timers.
|
|
*/
|
|
int mod_timer_pending(struct timer_list *timer, unsigned long expires)
|
|
{
|
|
return __mod_timer(timer, expires, true);
|
|
}
|
|
EXPORT_SYMBOL(mod_timer_pending);
|
|
|
|
/**
|
|
* mod_timer - modify a timer's timeout
|
|
* @timer: the timer to be modified
|
|
* @expires: new timeout in jiffies
|
|
*
|
|
* mod_timer() is a more efficient way to update the expire field of an
|
|
* active timer (if the timer is inactive it will be activated)
|
|
*
|
|
* mod_timer(timer, expires) is equivalent to:
|
|
*
|
|
* del_timer(timer); timer->expires = expires; add_timer(timer);
|
|
*
|
|
* Note that if there are multiple unserialized concurrent users of the
|
|
* same timer, then mod_timer() is the only safe way to modify the timeout,
|
|
* since add_timer() cannot modify an already running timer.
|
|
*
|
|
* The function returns whether it has modified a pending timer or not.
|
|
* (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
|
|
* active timer returns 1.)
|
|
*/
|
|
int mod_timer(struct timer_list *timer, unsigned long expires)
|
|
{
|
|
return __mod_timer(timer, expires, false);
|
|
}
|
|
EXPORT_SYMBOL(mod_timer);
|
|
|
|
/**
|
|
* add_timer - start a timer
|
|
* @timer: the timer to be added
|
|
*
|
|
* The kernel will do a ->function(->data) callback from the
|
|
* timer interrupt at the ->expires point in the future. The
|
|
* current time is 'jiffies'.
|
|
*
|
|
* The timer's ->expires, ->function (and if the handler uses it, ->data)
|
|
* fields must be set prior calling this function.
|
|
*
|
|
* Timers with an ->expires field in the past will be executed in the next
|
|
* timer tick.
|
|
*/
|
|
void add_timer(struct timer_list *timer)
|
|
{
|
|
BUG_ON(timer_pending(timer));
|
|
mod_timer(timer, timer->expires);
|
|
}
|
|
EXPORT_SYMBOL(add_timer);
|
|
|
|
/**
|
|
* add_timer_on - start a timer on a particular CPU
|
|
* @timer: the timer to be added
|
|
* @cpu: the CPU to start it on
|
|
*
|
|
* This is not very scalable on SMP. Double adds are not possible.
|
|
*/
|
|
void add_timer_on(struct timer_list *timer, int cpu)
|
|
{
|
|
struct timer_base *new_base, *base;
|
|
unsigned long flags;
|
|
|
|
BUG_ON(timer_pending(timer) || !timer->function);
|
|
|
|
new_base = get_timer_cpu_base(timer->flags, cpu);
|
|
|
|
/*
|
|
* If @timer was on a different CPU, it should be migrated with the
|
|
* old base locked to prevent other operations proceeding with the
|
|
* wrong base locked. See lock_timer_base().
|
|
*/
|
|
base = lock_timer_base(timer, &flags);
|
|
if (base != new_base) {
|
|
timer->flags |= TIMER_MIGRATING;
|
|
|
|
spin_unlock(&base->lock);
|
|
base = new_base;
|
|
spin_lock(&base->lock);
|
|
WRITE_ONCE(timer->flags,
|
|
(timer->flags & ~TIMER_BASEMASK) | cpu);
|
|
}
|
|
forward_timer_base(base);
|
|
|
|
debug_activate(timer, timer->expires);
|
|
internal_add_timer(base, timer);
|
|
spin_unlock_irqrestore(&base->lock, flags);
|
|
}
|
|
EXPORT_SYMBOL_GPL(add_timer_on);
|
|
|
|
/**
|
|
* del_timer - deactive a timer.
|
|
* @timer: the timer to be deactivated
|
|
*
|
|
* del_timer() deactivates a timer - this works on both active and inactive
|
|
* timers.
|
|
*
|
|
* The function returns whether it has deactivated a pending timer or not.
|
|
* (ie. del_timer() of an inactive timer returns 0, del_timer() of an
|
|
* active timer returns 1.)
|
|
*/
|
|
int del_timer(struct timer_list *timer)
|
|
{
|
|
struct timer_base *base;
|
|
unsigned long flags;
|
|
int ret = 0;
|
|
|
|
debug_assert_init(timer);
|
|
|
|
if (timer_pending(timer)) {
|
|
base = lock_timer_base(timer, &flags);
|
|
ret = detach_if_pending(timer, base, true);
|
|
spin_unlock_irqrestore(&base->lock, flags);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(del_timer);
|
|
|
|
/**
|
|
* try_to_del_timer_sync - Try to deactivate a timer
|
|
* @timer: timer do del
|
|
*
|
|
* This function tries to deactivate a timer. Upon successful (ret >= 0)
|
|
* exit the timer is not queued and the handler is not running on any CPU.
|
|
*/
|
|
int try_to_del_timer_sync(struct timer_list *timer)
|
|
{
|
|
struct timer_base *base;
|
|
unsigned long flags;
|
|
int ret = -1;
|
|
|
|
debug_assert_init(timer);
|
|
|
|
base = lock_timer_base(timer, &flags);
|
|
|
|
if (base->running_timer != timer)
|
|
ret = detach_if_pending(timer, base, true);
|
|
|
|
spin_unlock_irqrestore(&base->lock, flags);
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(try_to_del_timer_sync);
|
|
|
|
#ifdef CONFIG_SMP
|
|
/**
|
|
* del_timer_sync - deactivate a timer and wait for the handler to finish.
|
|
* @timer: the timer to be deactivated
|
|
*
|
|
* This function only differs from del_timer() on SMP: besides deactivating
|
|
* the timer it also makes sure the handler has finished executing on other
|
|
* CPUs.
|
|
*
|
|
* Synchronization rules: Callers must prevent restarting of the timer,
|
|
* otherwise this function is meaningless. It must not be called from
|
|
* interrupt contexts unless the timer is an irqsafe one. The caller must
|
|
* not hold locks which would prevent completion of the timer's
|
|
* handler. The timer's handler must not call add_timer_on(). Upon exit the
|
|
* timer is not queued and the handler is not running on any CPU.
|
|
*
|
|
* Note: For !irqsafe timers, you must not hold locks that are held in
|
|
* interrupt context while calling this function. Even if the lock has
|
|
* nothing to do with the timer in question. Here's why:
|
|
*
|
|
* CPU0 CPU1
|
|
* ---- ----
|
|
* <SOFTIRQ>
|
|
* call_timer_fn();
|
|
* base->running_timer = mytimer;
|
|
* spin_lock_irq(somelock);
|
|
* <IRQ>
|
|
* spin_lock(somelock);
|
|
* del_timer_sync(mytimer);
|
|
* while (base->running_timer == mytimer);
|
|
*
|
|
* Now del_timer_sync() will never return and never release somelock.
|
|
* The interrupt on the other CPU is waiting to grab somelock but
|
|
* it has interrupted the softirq that CPU0 is waiting to finish.
|
|
*
|
|
* The function returns whether it has deactivated a pending timer or not.
|
|
*/
|
|
int del_timer_sync(struct timer_list *timer)
|
|
{
|
|
#ifdef CONFIG_LOCKDEP
|
|
unsigned long flags;
|
|
|
|
/*
|
|
* If lockdep gives a backtrace here, please reference
|
|
* the synchronization rules above.
|
|
*/
|
|
local_irq_save(flags);
|
|
lock_map_acquire(&timer->lockdep_map);
|
|
lock_map_release(&timer->lockdep_map);
|
|
local_irq_restore(flags);
|
|
#endif
|
|
/*
|
|
* don't use it in hardirq context, because it
|
|
* could lead to deadlock.
|
|
*/
|
|
WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
|
|
for (;;) {
|
|
int ret = try_to_del_timer_sync(timer);
|
|
if (ret >= 0)
|
|
return ret;
|
|
udelay(1);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(del_timer_sync);
|
|
#endif
|
|
|
|
static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
|
|
unsigned long data)
|
|
{
|
|
int count = preempt_count();
|
|
|
|
#ifdef CONFIG_LOCKDEP
|
|
/*
|
|
* It is permissible to free the timer from inside the
|
|
* function that is called from it, this we need to take into
|
|
* account for lockdep too. To avoid bogus "held lock freed"
|
|
* warnings as well as problems when looking into
|
|
* timer->lockdep_map, make a copy and use that here.
|
|
*/
|
|
struct lockdep_map lockdep_map;
|
|
|
|
lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
|
|
#endif
|
|
/*
|
|
* Couple the lock chain with the lock chain at
|
|
* del_timer_sync() by acquiring the lock_map around the fn()
|
|
* call here and in del_timer_sync().
|
|
*/
|
|
lock_map_acquire(&lockdep_map);
|
|
|
|
trace_timer_expire_entry(timer);
|
|
fn(data);
|
|
trace_timer_expire_exit(timer);
|
|
|
|
lock_map_release(&lockdep_map);
|
|
|
|
if (count != preempt_count()) {
|
|
WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
|
|
fn, count, preempt_count());
|
|
/*
|
|
* Restore the preempt count. That gives us a decent
|
|
* chance to survive and extract information. If the
|
|
* callback kept a lock held, bad luck, but not worse
|
|
* than the BUG() we had.
|
|
*/
|
|
preempt_count_set(count);
|
|
}
|
|
}
|
|
|
|
static void expire_timers(struct timer_base *base, struct hlist_head *head)
|
|
{
|
|
while (!hlist_empty(head)) {
|
|
struct timer_list *timer;
|
|
void (*fn)(unsigned long);
|
|
unsigned long data;
|
|
|
|
timer = hlist_entry(head->first, struct timer_list, entry);
|
|
|
|
base->running_timer = timer;
|
|
detach_timer(timer, true);
|
|
|
|
fn = timer->function;
|
|
data = timer->data;
|
|
|
|
if (timer->flags & TIMER_IRQSAFE) {
|
|
spin_unlock(&base->lock);
|
|
call_timer_fn(timer, fn, data);
|
|
spin_lock(&base->lock);
|
|
} else {
|
|
spin_unlock_irq(&base->lock);
|
|
call_timer_fn(timer, fn, data);
|
|
spin_lock_irq(&base->lock);
|
|
}
|
|
}
|
|
}
|
|
|
|
static int __collect_expired_timers(struct timer_base *base,
|
|
struct hlist_head *heads)
|
|
{
|
|
unsigned long clk = base->clk;
|
|
struct hlist_head *vec;
|
|
int i, levels = 0;
|
|
unsigned int idx;
|
|
|
|
for (i = 0; i < LVL_DEPTH; i++) {
|
|
idx = (clk & LVL_MASK) + i * LVL_SIZE;
|
|
|
|
if (__test_and_clear_bit(idx, base->pending_map)) {
|
|
vec = base->vectors + idx;
|
|
hlist_move_list(vec, heads++);
|
|
levels++;
|
|
}
|
|
/* Is it time to look at the next level? */
|
|
if (clk & LVL_CLK_MASK)
|
|
break;
|
|
/* Shift clock for the next level granularity */
|
|
clk >>= LVL_CLK_SHIFT;
|
|
}
|
|
return levels;
|
|
}
|
|
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
|
/*
|
|
* Find the next pending bucket of a level. Search from level start (@offset)
|
|
* + @clk upwards and if nothing there, search from start of the level
|
|
* (@offset) up to @offset + clk.
|
|
*/
|
|
static int next_pending_bucket(struct timer_base *base, unsigned offset,
|
|
unsigned int clk, int lvl)
|
|
{
|
|
unsigned int pos_up = -1, pos_down, start = offset + clk;
|
|
unsigned end = offset + LVL_SIZE;
|
|
unsigned int pos;
|
|
|
|
pos = find_next_bit(base->pending_map, end, start);
|
|
if (pos < end)
|
|
pos_up = pos - start;
|
|
|
|
pos = find_next_bit(base->pending_map, start, offset);
|
|
pos_down = pos < start ? pos + LVL_SIZE - start : -1;
|
|
if (((pos_up + (u64)base->clk) << LVL_SHIFT(lvl)) >
|
|
((pos_down + (u64)base->clk) << LVL_SHIFT(lvl)))
|
|
return pos_down;
|
|
return pos_up;
|
|
}
|
|
|
|
/*
|
|
* Search the first expiring timer in the various clock levels. Caller must
|
|
* hold base->lock.
|
|
*/
|
|
static unsigned long __next_timer_interrupt(struct timer_base *base)
|
|
{
|
|
unsigned long clk, next, adj;
|
|
unsigned lvl, offset = 0;
|
|
|
|
next = base->clk + NEXT_TIMER_MAX_DELTA;
|
|
clk = base->clk;
|
|
for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
|
|
int pos = next_pending_bucket(base, offset, clk & LVL_MASK,
|
|
lvl);
|
|
|
|
if (pos >= 0) {
|
|
unsigned long tmp = clk + (unsigned long) pos;
|
|
|
|
tmp <<= LVL_SHIFT(lvl);
|
|
if (time_before(tmp, next))
|
|
next = tmp;
|
|
}
|
|
/*
|
|
* Clock for the next level. If the current level clock lower
|
|
* bits are zero, we look at the next level as is. If not we
|
|
* need to advance it by one because that's going to be the
|
|
* next expiring bucket in that level. base->clk is the next
|
|
* expiring jiffie. So in case of:
|
|
*
|
|
* LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
|
|
* 0 0 0 0 0 0
|
|
*
|
|
* we have to look at all levels @index 0. With
|
|
*
|
|
* LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
|
|
* 0 0 0 0 0 2
|
|
*
|
|
* LVL0 has the next expiring bucket @index 2. The upper
|
|
* levels have the next expiring bucket @index 1.
|
|
*
|
|
* In case that the propagation wraps the next level the same
|
|
* rules apply:
|
|
*
|
|
* LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
|
|
* 0 0 0 0 F 2
|
|
*
|
|
* So after looking at LVL0 we get:
|
|
*
|
|
* LVL5 LVL4 LVL3 LVL2 LVL1
|
|
* 0 0 0 1 0
|
|
*
|
|
* So no propagation from LVL1 to LVL2 because that happened
|
|
* with the add already, but then we need to propagate further
|
|
* from LVL2 to LVL3.
|
|
*
|
|
* So the simple check whether the lower bits of the current
|
|
* level are 0 or not is sufficient for all cases.
|
|
*/
|
|
adj = clk & LVL_CLK_MASK ? 1 : 0;
|
|
clk >>= LVL_CLK_SHIFT;
|
|
clk += adj;
|
|
}
|
|
return next;
|
|
}
|
|
|
|
/*
|
|
* Check, if the next hrtimer event is before the next timer wheel
|
|
* event:
|
|
*/
|
|
static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
|
|
{
|
|
u64 nextevt = hrtimer_get_next_event();
|
|
|
|
/*
|
|
* If high resolution timers are enabled
|
|
* hrtimer_get_next_event() returns KTIME_MAX.
|
|
*/
|
|
if (expires <= nextevt)
|
|
return expires;
|
|
|
|
/*
|
|
* If the next timer is already expired, return the tick base
|
|
* time so the tick is fired immediately.
|
|
*/
|
|
if (nextevt <= basem)
|
|
return basem;
|
|
|
|
/*
|
|
* Round up to the next jiffie. High resolution timers are
|
|
* off, so the hrtimers are expired in the tick and we need to
|
|
* make sure that this tick really expires the timer to avoid
|
|
* a ping pong of the nohz stop code.
|
|
*
|
|
* Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
|
|
*/
|
|
return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
/*
|
|
* check_pending_deferrable_timers - Check for unbound deferrable timer expiry
|
|
* @cpu - Current CPU
|
|
*
|
|
* The function checks whether any global deferrable pending timers
|
|
* are exipired or not. This function does not check cpu bounded
|
|
* diferrable pending timers expiry.
|
|
*
|
|
* The function returns true when a cpu unbounded deferrable timer is expired.
|
|
*/
|
|
bool check_pending_deferrable_timers(int cpu)
|
|
{
|
|
if (cpu == tick_do_timer_cpu ||
|
|
tick_do_timer_cpu == TICK_DO_TIMER_NONE) {
|
|
if (time_after_eq(jiffies, timer_base_deferrable.clk)
|
|
&& !atomic_cmpxchg(&deferrable_pending, 0, 1)) {
|
|
return true;
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
#endif
|
|
|
|
/**
|
|
* get_next_timer_interrupt - return the time (clock mono) of the next timer
|
|
* @basej: base time jiffies
|
|
* @basem: base time clock monotonic
|
|
*
|
|
* Returns the tick aligned clock monotonic time of the next pending
|
|
* timer or KTIME_MAX if no timer is pending.
|
|
*/
|
|
u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
|
|
{
|
|
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
|
|
u64 expires = KTIME_MAX;
|
|
unsigned long nextevt;
|
|
bool is_max_delta;
|
|
|
|
/*
|
|
* Pretend that there is no timer pending if the cpu is offline.
|
|
* Possible pending timers will be migrated later to an active cpu.
|
|
*/
|
|
if (cpu_is_offline(smp_processor_id()))
|
|
return expires;
|
|
|
|
spin_lock(&base->lock);
|
|
nextevt = __next_timer_interrupt(base);
|
|
is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
|
|
base->next_expiry = nextevt;
|
|
/*
|
|
* We have a fresh next event. Check whether we can forward the
|
|
* base. We can only do that when @basej is past base->clk
|
|
* otherwise we might rewind base->clk.
|
|
*/
|
|
if (time_after(basej, base->clk)) {
|
|
if (time_after(nextevt, basej))
|
|
base->clk = basej;
|
|
else if (time_after(nextevt, base->clk))
|
|
base->clk = nextevt;
|
|
}
|
|
|
|
if (time_before_eq(nextevt, basej)) {
|
|
expires = basem;
|
|
base->is_idle = false;
|
|
} else {
|
|
if (!is_max_delta)
|
|
expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
|
|
/*
|
|
* If we expect to sleep more than a tick, mark the base idle.
|
|
* Also the tick is stopped so any added timer must forward
|
|
* the base clk itself to keep granularity small. This idle
|
|
* logic is only maintained for the BASE_STD base, deferrable
|
|
* timers may still see large granularity skew (by design).
|
|
*/
|
|
if ((expires - basem) > TICK_NSEC) {
|
|
base->must_forward_clk = true;
|
|
base->is_idle = true;
|
|
}
|
|
}
|
|
spin_unlock(&base->lock);
|
|
|
|
return cmp_next_hrtimer_event(basem, expires);
|
|
}
|
|
|
|
/**
|
|
* timer_clear_idle - Clear the idle state of the timer base
|
|
*
|
|
* Called with interrupts disabled
|
|
*/
|
|
void timer_clear_idle(void)
|
|
{
|
|
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
|
|
|
|
/*
|
|
* We do this unlocked. The worst outcome is a remote enqueue sending
|
|
* a pointless IPI, but taking the lock would just make the window for
|
|
* sending the IPI a few instructions smaller for the cost of taking
|
|
* the lock in the exit from idle path.
|
|
*/
|
|
base->is_idle = false;
|
|
}
|
|
|
|
static int collect_expired_timers(struct timer_base *base,
|
|
struct hlist_head *heads)
|
|
{
|
|
unsigned long now = READ_ONCE(jiffies);
|
|
|
|
/*
|
|
* NOHZ optimization. After a long idle sleep we need to forward the
|
|
* base to current jiffies. Avoid a loop by searching the bitfield for
|
|
* the next expiring timer.
|
|
*/
|
|
if ((long)(now - base->clk) > 2) {
|
|
unsigned long next = __next_timer_interrupt(base);
|
|
|
|
/*
|
|
* If the next timer is ahead of time forward to current
|
|
* jiffies, otherwise forward to the next expiry time:
|
|
*/
|
|
if (time_after(next, now)) {
|
|
/* The call site will increment clock! */
|
|
base->clk = now - 1;
|
|
return 0;
|
|
}
|
|
base->clk = next;
|
|
}
|
|
return __collect_expired_timers(base, heads);
|
|
}
|
|
#else
|
|
static inline int collect_expired_timers(struct timer_base *base,
|
|
struct hlist_head *heads)
|
|
{
|
|
return __collect_expired_timers(base, heads);
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* Called from the timer interrupt handler to charge one tick to the current
|
|
* process. user_tick is 1 if the tick is user time, 0 for system.
|
|
*/
|
|
void update_process_times(int user_tick)
|
|
{
|
|
struct task_struct *p = current;
|
|
|
|
/* Note: this timer irq context must be accounted for as well. */
|
|
account_process_tick(p, user_tick);
|
|
run_local_timers();
|
|
rcu_check_callbacks(user_tick);
|
|
#ifdef CONFIG_IRQ_WORK
|
|
if (in_irq())
|
|
irq_work_tick();
|
|
#endif
|
|
scheduler_tick();
|
|
run_posix_cpu_timers(p);
|
|
}
|
|
|
|
/**
|
|
* __run_timers - run all expired timers (if any) on this CPU.
|
|
* @base: the timer vector to be processed.
|
|
*/
|
|
static inline void __run_timers(struct timer_base *base)
|
|
{
|
|
struct hlist_head heads[LVL_DEPTH];
|
|
int levels;
|
|
|
|
if (!time_after_eq(jiffies, base->clk))
|
|
return;
|
|
|
|
spin_lock_irq(&base->lock);
|
|
|
|
/*
|
|
* timer_base::must_forward_clk must be cleared before running
|
|
* timers so that any timer functions that call mod_timer() will
|
|
* not try to forward the base. Idle tracking / clock forwarding
|
|
* logic is only used with BASE_STD timers.
|
|
*
|
|
* The must_forward_clk flag is cleared unconditionally also for
|
|
* the deferrable base. The deferrable base is not affected by idle
|
|
* tracking and never forwarded, so clearing the flag is a NOOP.
|
|
*
|
|
* The fact that the deferrable base is never forwarded can cause
|
|
* large variations in granularity for deferrable timers, but they
|
|
* can be deferred for long periods due to idle anyway.
|
|
*/
|
|
base->must_forward_clk = false;
|
|
|
|
while (time_after_eq(jiffies, base->clk)) {
|
|
|
|
levels = collect_expired_timers(base, heads);
|
|
base->clk++;
|
|
|
|
while (levels--)
|
|
expire_timers(base, heads + levels);
|
|
}
|
|
base->running_timer = NULL;
|
|
spin_unlock_irq(&base->lock);
|
|
}
|
|
|
|
/*
|
|
* This function runs timers and the timer-tq in bottom half context.
|
|
*/
|
|
static __latent_entropy void run_timer_softirq(struct softirq_action *h)
|
|
{
|
|
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
|
|
|
|
__run_timers(base);
|
|
if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
|
|
__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
|
|
|
|
if ((atomic_cmpxchg(&deferrable_pending, 1, 0) &&
|
|
tick_do_timer_cpu == TICK_DO_TIMER_NONE) ||
|
|
tick_do_timer_cpu == smp_processor_id())
|
|
__run_timers(&timer_base_deferrable);
|
|
}
|
|
|
|
/*
|
|
* Called by the local, per-CPU timer interrupt on SMP.
|
|
*/
|
|
void run_local_timers(void)
|
|
{
|
|
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
|
|
|
|
hrtimer_run_queues();
|
|
/* Raise the softirq only if required. */
|
|
if (time_before(jiffies, base->clk)) {
|
|
if (!IS_ENABLED(CONFIG_NO_HZ_COMMON))
|
|
return;
|
|
/* CPU is awake, so check the deferrable base. */
|
|
base++;
|
|
if (time_before(jiffies, base->clk))
|
|
return;
|
|
}
|
|
raise_softirq(TIMER_SOFTIRQ);
|
|
}
|
|
|
|
#ifdef __ARCH_WANT_SYS_ALARM
|
|
|
|
/*
|
|
* For backwards compatibility? This can be done in libc so Alpha
|
|
* and all newer ports shouldn't need it.
|
|
*/
|
|
SYSCALL_DEFINE1(alarm, unsigned int, seconds)
|
|
{
|
|
return alarm_setitimer(seconds);
|
|
}
|
|
|
|
#endif
|
|
|
|
static void process_timeout(unsigned long __data)
|
|
{
|
|
wake_up_process((struct task_struct *)__data);
|
|
}
|
|
|
|
/**
|
|
* schedule_timeout - sleep until timeout
|
|
* @timeout: timeout value in jiffies
|
|
*
|
|
* Make the current task sleep until @timeout jiffies have
|
|
* elapsed. The routine will return immediately unless
|
|
* the current task state has been set (see set_current_state()).
|
|
*
|
|
* You can set the task state as follows -
|
|
*
|
|
* %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
|
|
* pass before the routine returns. The routine will return 0
|
|
*
|
|
* %TASK_INTERRUPTIBLE - the routine may return early if a signal is
|
|
* delivered to the current task. In this case the remaining time
|
|
* in jiffies will be returned, or 0 if the timer expired in time
|
|
*
|
|
* The current task state is guaranteed to be TASK_RUNNING when this
|
|
* routine returns.
|
|
*
|
|
* Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
|
|
* the CPU away without a bound on the timeout. In this case the return
|
|
* value will be %MAX_SCHEDULE_TIMEOUT.
|
|
*
|
|
* In all cases the return value is guaranteed to be non-negative.
|
|
*/
|
|
signed long __sched schedule_timeout(signed long timeout)
|
|
{
|
|
struct timer_list timer;
|
|
unsigned long expire;
|
|
|
|
switch (timeout)
|
|
{
|
|
case MAX_SCHEDULE_TIMEOUT:
|
|
/*
|
|
* These two special cases are useful to be comfortable
|
|
* in the caller. Nothing more. We could take
|
|
* MAX_SCHEDULE_TIMEOUT from one of the negative value
|
|
* but I' d like to return a valid offset (>=0) to allow
|
|
* the caller to do everything it want with the retval.
|
|
*/
|
|
schedule();
|
|
goto out;
|
|
default:
|
|
/*
|
|
* Another bit of PARANOID. Note that the retval will be
|
|
* 0 since no piece of kernel is supposed to do a check
|
|
* for a negative retval of schedule_timeout() (since it
|
|
* should never happens anyway). You just have the printk()
|
|
* that will tell you if something is gone wrong and where.
|
|
*/
|
|
if (timeout < 0) {
|
|
printk(KERN_ERR "schedule_timeout: wrong timeout "
|
|
"value %lx\n", timeout);
|
|
dump_stack();
|
|
current->state = TASK_RUNNING;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
expire = timeout + jiffies;
|
|
|
|
setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
|
|
__mod_timer(&timer, expire, false);
|
|
schedule();
|
|
del_singleshot_timer_sync(&timer);
|
|
|
|
/* Remove the timer from the object tracker */
|
|
destroy_timer_on_stack(&timer);
|
|
|
|
timeout = expire - jiffies;
|
|
|
|
out:
|
|
return timeout < 0 ? 0 : timeout;
|
|
}
|
|
EXPORT_SYMBOL(schedule_timeout);
|
|
|
|
/*
|
|
* We can use __set_current_state() here because schedule_timeout() calls
|
|
* schedule() unconditionally.
|
|
*/
|
|
signed long __sched schedule_timeout_interruptible(signed long timeout)
|
|
{
|
|
__set_current_state(TASK_INTERRUPTIBLE);
|
|
return schedule_timeout(timeout);
|
|
}
|
|
EXPORT_SYMBOL(schedule_timeout_interruptible);
|
|
|
|
signed long __sched schedule_timeout_killable(signed long timeout)
|
|
{
|
|
__set_current_state(TASK_KILLABLE);
|
|
return schedule_timeout(timeout);
|
|
}
|
|
EXPORT_SYMBOL(schedule_timeout_killable);
|
|
|
|
signed long __sched schedule_timeout_uninterruptible(signed long timeout)
|
|
{
|
|
__set_current_state(TASK_UNINTERRUPTIBLE);
|
|
return schedule_timeout(timeout);
|
|
}
|
|
EXPORT_SYMBOL(schedule_timeout_uninterruptible);
|
|
|
|
/*
|
|
* Like schedule_timeout_uninterruptible(), except this task will not contribute
|
|
* to load average.
|
|
*/
|
|
signed long __sched schedule_timeout_idle(signed long timeout)
|
|
{
|
|
__set_current_state(TASK_IDLE);
|
|
return schedule_timeout(timeout);
|
|
}
|
|
EXPORT_SYMBOL(schedule_timeout_idle);
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
static void migrate_timer_list(struct timer_base *new_base,
|
|
struct hlist_head *head, bool remove_pinned)
|
|
{
|
|
struct timer_list *timer;
|
|
int cpu = new_base->cpu;
|
|
struct hlist_node *n;
|
|
int is_pinned;
|
|
|
|
hlist_for_each_entry_safe(timer, n, head, entry) {
|
|
is_pinned = timer->flags & TIMER_PINNED;
|
|
if (!remove_pinned && is_pinned)
|
|
continue;
|
|
|
|
detach_if_pending(timer, get_timer_base(timer->flags), false);
|
|
timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
|
|
internal_add_timer(new_base, timer);
|
|
}
|
|
}
|
|
|
|
int timers_prepare_cpu(unsigned int cpu)
|
|
{
|
|
struct timer_base *base;
|
|
int b;
|
|
|
|
for (b = 0; b < NR_BASES; b++) {
|
|
base = per_cpu_ptr(&timer_bases[b], cpu);
|
|
base->clk = jiffies;
|
|
base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
|
|
base->is_idle = false;
|
|
base->must_forward_clk = true;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static void __migrate_timers(unsigned int cpu, bool remove_pinned)
|
|
{
|
|
struct timer_base *old_base;
|
|
struct timer_base *new_base;
|
|
unsigned long flags;
|
|
int b, i;
|
|
|
|
for (b = 0; b < NR_BASES; b++) {
|
|
old_base = per_cpu_ptr(&timer_bases[b], cpu);
|
|
new_base = get_cpu_ptr(&timer_bases[b]);
|
|
/*
|
|
* The caller is globally serialized and nobody else
|
|
* takes two locks at once, deadlock is not possible.
|
|
*/
|
|
spin_lock_irqsave(&new_base->lock, flags);
|
|
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
|
|
|
|
/*
|
|
* The current CPUs base clock might be stale. Update it
|
|
* before moving the timers over.
|
|
*/
|
|
forward_timer_base(new_base);
|
|
|
|
if (!cpu_online(cpu))
|
|
BUG_ON(old_base->running_timer);
|
|
|
|
for (i = 0; i < WHEEL_SIZE; i++)
|
|
migrate_timer_list(new_base, old_base->vectors + i,
|
|
remove_pinned);
|
|
|
|
spin_unlock(&old_base->lock);
|
|
spin_unlock_irqrestore(&new_base->lock, flags);
|
|
put_cpu_ptr(&timer_bases);
|
|
}
|
|
}
|
|
|
|
int timers_dead_cpu(unsigned int cpu)
|
|
{
|
|
BUG_ON(cpu_online(cpu));
|
|
__migrate_timers(cpu, true);
|
|
return 0;
|
|
}
|
|
|
|
void timer_quiesce_cpu(void *cpup)
|
|
{
|
|
__migrate_timers(*(unsigned int *)cpup, false);
|
|
}
|
|
|
|
#endif /* CONFIG_HOTPLUG_CPU */
|
|
|
|
static void __init init_timer_cpu(int cpu)
|
|
{
|
|
struct timer_base *base;
|
|
int i;
|
|
|
|
for (i = 0; i < NR_BASES; i++) {
|
|
base = per_cpu_ptr(&timer_bases[i], cpu);
|
|
base->cpu = cpu;
|
|
spin_lock_init(&base->lock);
|
|
base->clk = jiffies;
|
|
}
|
|
}
|
|
|
|
static inline void init_timer_deferrable_global(void)
|
|
{
|
|
timer_base_deferrable.cpu = nr_cpu_ids;
|
|
spin_lock_init(&timer_base_deferrable.lock);
|
|
timer_base_deferrable.clk = jiffies;
|
|
}
|
|
|
|
static void __init init_timer_cpus(void)
|
|
{
|
|
int cpu;
|
|
|
|
init_timer_deferrable_global();
|
|
|
|
for_each_possible_cpu(cpu)
|
|
init_timer_cpu(cpu);
|
|
}
|
|
|
|
void __init init_timers(void)
|
|
{
|
|
init_timer_cpus();
|
|
open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
|
|
}
|
|
|
|
/**
|
|
* msleep - sleep safely even with waitqueue interruptions
|
|
* @msecs: Time in milliseconds to sleep for
|
|
*/
|
|
void msleep(unsigned int msecs)
|
|
{
|
|
unsigned long timeout = msecs_to_jiffies(msecs) + 1;
|
|
|
|
while (timeout)
|
|
timeout = schedule_timeout_uninterruptible(timeout);
|
|
}
|
|
|
|
EXPORT_SYMBOL(msleep);
|
|
|
|
/**
|
|
* msleep_interruptible - sleep waiting for signals
|
|
* @msecs: Time in milliseconds to sleep for
|
|
*/
|
|
unsigned long msleep_interruptible(unsigned int msecs)
|
|
{
|
|
unsigned long timeout = msecs_to_jiffies(msecs) + 1;
|
|
|
|
while (timeout && !signal_pending(current))
|
|
timeout = schedule_timeout_interruptible(timeout);
|
|
return jiffies_to_msecs(timeout);
|
|
}
|
|
|
|
EXPORT_SYMBOL(msleep_interruptible);
|
|
|
|
static void __sched do_usleep_range(unsigned long min, unsigned long max)
|
|
{
|
|
ktime_t kmin;
|
|
u64 delta;
|
|
|
|
kmin = ktime_set(0, min * NSEC_PER_USEC);
|
|
delta = (u64)(max - min) * NSEC_PER_USEC;
|
|
schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
|
|
}
|
|
|
|
/**
|
|
* usleep_range - Sleep for an approximate time
|
|
* @min: Minimum time in usecs to sleep
|
|
* @max: Maximum time in usecs to sleep
|
|
*
|
|
* In non-atomic context where the exact wakeup time is flexible, use
|
|
* usleep_range() instead of udelay(). The sleep improves responsiveness
|
|
* by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
|
|
* power usage by allowing hrtimers to take advantage of an already-
|
|
* scheduled interrupt instead of scheduling a new one just for this sleep.
|
|
*/
|
|
void __sched usleep_range(unsigned long min, unsigned long max)
|
|
{
|
|
__set_current_state(TASK_UNINTERRUPTIBLE);
|
|
do_usleep_range(min, max);
|
|
}
|
|
EXPORT_SYMBOL(usleep_range);
|