Files
Lucas Wei a7836adcc8 Merge android-4.9-q (4.9.264) into android-msm-pixel-4.9-lts
Merge 4.9.264 into android-4.9-q
Linux 4.9.264
    xen-blkback: don't leak persistent grants from xen_blkbk_map()
    mac80211: fix double free in ibss_leave
    net: qrtr: fix a kernel-infoleak in qrtr_recvmsg()
    net: sched: validate stab values
  * can: dev: Move device back to init netns on owning netns delete
      include/net/rtnetlink.h
      net/core/dev.c
  * futex: Handle transient "ownerless" rtmutex state correctly
      kernel/futex.c
  * futex: Fix incorrect should_fail_futex() handling
      kernel/futex.c
  * futex: Prevent robust futex exit race
      kernel/futex.c
  * arm64: futex: Bound number of LDXR/STXR loops in FUTEX_WAKE_OP
      arch/arm64/include/asm/futex.h
  * locking/futex: Allow low-level atomic operations to return -EAGAIN
      kernel/futex.c
  * futex: Fix (possible) missed wakeup
      kernel/futex.c
  * futex: Handle early deadlock return correctly
      kernel/futex.c
      kernel/locking/rtmutex.c
  * futex,rt_mutex: Fix rt_mutex_cleanup_proxy_lock()
      kernel/locking/rtmutex.c
  * futex: Avoid freeing an active timer
      kernel/futex.c
  * futex: Drop hb->lock before enqueueing on the rtmutex
      kernel/futex.c
      kernel/locking/rtmutex.c
      kernel/locking/rtmutex_common.h
  * futex: Rework futex_lock_pi() to use rt_mutex_*_proxy_lock()
      kernel/futex.c
      kernel/locking/rtmutex.c
      kernel/locking/rtmutex_common.h
  * futex,rt_mutex: Introduce rt_mutex_init_waiter()
      kernel/futex.c
      kernel/locking/rtmutex.c
      kernel/locking/rtmutex_common.h
  * futex: Use smp_store_release() in mark_wake_futex()
      kernel/futex.c
  * idr: add ida_is_empty
      drivers/usb/gadget/function/f_hid.c
      include/linux/idr.h
    perf auxtrace: Fix auxtrace queue conflict
    ACPI: scan: Use unique number for instance_no
    ACPI: scan: Rearrange memory allocation in acpi_device_add()
    RDMA/cxgb4: Fix adapter LE hash errors while destroying ipv6 listening server
    net: cdc-phonet: fix data-interface release on probe failure
    mac80211: fix rate mask reset
    can: m_can: m_can_do_rx_poll(): fix extraneous msg loss warning
    can: c_can: move runtime PM enable/disable to c_can_platform
    can: c_can_pci: c_can_pci_remove(): fix use-after-free
    net/qlcnic: Fix a use after free in qlcnic_83xx_get_minidump_template
    e1000e: Fix error handling in e1000_set_d0_lplu_state_82571
    e1000e: add rtnl_lock() to e1000_reset_task
    net: dsa: bcm_sf2: Qualify phydev->dev_flags based on port
  * macvlan: macvlan_count_rx() needs to be aware of preemption
      include/linux/if_macvlan.h
    bus: omap_l3_noc: mark l3 irqs as IRQF_NO_THREAD
    arm64: dts: ls1043a: mark crypto engine dma coherent
    squashfs: fix xattr id and id lookup sanity checks
  * squashfs: fix inode lookup sanity checks
      fs/squashfs/squashfs_fs.h
    x86/tlb: Flush global mappings when KAISER is disabled
    ia64: fix ptrace(PTRACE_SYSCALL_INFO_EXIT) sign
    ia64: fix ia64_syscall_get_set_arguments() for break-based syscalls
    nfs: we don't support removing system.nfs4_acl
  * u64_stats,lockdep: Fix u64_stats_init() vs lockdep
      include/linux/u64_stats_sync.h
    atm: idt77252: fix null-ptr-dereference
    atm: uPD98402: fix incorrect allocation
    net: wan: fix error return code of uhdlc_init()
    NFS: Correct size calculation for create reply length
  * nfs: fix PNFS_FLEXFILE_LAYOUT Kconfig default
      fs/nfs/Kconfig
    sun/niu: fix wrong RXMAC_BC_FRM_CNT_COUNT count
    net: tehuti: fix error return code in bdx_probe()
    ixgbe: Fix memleak in ixgbe_configure_clsu32
    atm: lanai: dont run lanai_dev_close if not open
    atm: eni: dont release is never initialized
    powerpc/4xx: Fix build errors from mfdcr()
    net: fec: ptp: avoid register access when ipg clock is disabled
    ANDROID: Make vsock virtio packet buff size configurable
    Merge 4.9.263 into android-4.9-q
Linux 4.9.263
  * genirq: Disable interrupts for force threaded handlers
      kernel/irq/manage.c
  * ext4: fix potential error in ext4_do_update_inode
      fs/ext4/inode.c
  * ext4: find old entry again if failed to rename whiteout
      fs/ext4/namei.c
    x86: Introduce TS_COMPAT_RESTART to fix get_nr_restart_syscall()
    x86: Move TS_COMPAT back to asm/thread_info.h
  * kernel, fs: Introduce and use set_restart_fn() and arch_set_restart_data()
      fs/select.c
      include/linux/thread_info.h
      kernel/futex.c
      kernel/time/alarmtimer.c
      kernel/time/hrtimer.c
      kernel/time/posix-cpu-timers.c
    x86/ioapic: Ignore IRQ2 again
    perf/x86/intel: Fix a crash caused by zero PEBS status
    PCI: rpadlpar: Fix potential drc_name corruption in store functions
    iio: adis16400: Fix an error code in adis16400_initial_setup()
  * usb: gadget: configfs: Fix KASAN use-after-free
      drivers/usb/gadget/configfs.c
  * USB: replace hardcode maximum usb string length by definition
      drivers/usb/gadget/composite.c
      drivers/usb/gadget/configfs.c
      drivers/usb/gadget/usbstring.c
      include/uapi/linux/usb/ch9.h
    scsi: lpfc: Fix some error codes in debugfs
    net/qrtr: fix __netdev_alloc_skb call
    sunrpc: fix refcount leak for rpc auth modules
    svcrdma: disable timeouts on rdma backchannel
    NFSD: Repair misuse of sv_lock in 5.10.16-rt30.
    nvmet: don't check iosqes,iocqes for discovery controllers
    btrfs: fix race when cloning extent buffer during rewind of an old root
    ixgbe: prevent ptp_rx_hang from running when in FILTER_ALL mode
    ixgbe: check for Tx timestamp timeouts during watchdog
    net: dsa: b53: Support setting learning on port
  * ext4: check journal inode extents more carefully
      fs/ext4/block_validity.c
      fs/ext4/ext4.h
      fs/ext4/extents.c
      fs/ext4/indirect.c
      fs/ext4/inode.c
      fs/ext4/mballoc.c
  * ext4: don't allow overlapping system zones
      fs/ext4/block_validity.c
  * ext4: handle error of ext4_setup_system_zone() on remount
      fs/ext4/super.c
    Merge 4.9.262 into android-4.9-q
  * FROMGIT: configfs: fix a use-after-free in __configfs_open_file
      fs/configfs/file.c
Linux 4.9.262
    xen/events: avoid handling the same event on two cpus at the same time
    xen/events: don't unmask an event channel when an eoi is pending
    xen/events: reset affinity of 2-level event when tearing it down
    iio: imu: adis16400: fix memory leak
    iio: imu: adis16400: release allocated memory on failure
    KVM: arm64: Fix exclusive limit for IPA size
    hwmon: (lm90) Fix max6658 sporadic wrong temperature reading
    binfmt_misc: fix possible deadlock in bm_register_write
    powerpc/64s: Fix instruction encoding for lis in ppc_function_entry()
    alpha: switch __copy_user() and __do_clean_user() to normal calling conventions
    alpha: Package string routines together
    alpha: make short build log available for division routines
    alpha: merge build rules of division routines
    alpha: add $(src)/ rather than $(obj)/ to make source file path
  * configfs: fix a use-after-free in __configfs_open_file
      fs/configfs/file.c
    block: rsxx: fix error return code of rsxx_pci_probe()
    NFSv4.2: fix return value of _nfs4_get_security_label()
    sh_eth: fix TRSCER mask for R7S72100
    staging: comedi: pcl818: Fix endian problem for AI command data
    staging: comedi: pcl711: Fix endian problem for AI command data
    staging: comedi: me4000: Fix endian problem for AI command data
    staging: comedi: dmm32at: Fix endian problem for AI command data
    staging: comedi: das800: Fix endian problem for AI command data
    staging: comedi: das6402: Fix endian problem for AI command data
    staging: comedi: adv_pci1710: Fix endian problem for AI command data
    staging: comedi: addi_apci_1500: Fix endian problem for command sample
    staging: comedi: addi_apci_1032: Fix endian problem for COS sample
    staging: rtl8192e: Fix possible buffer overflow in _rtl92e_wx_set_scan
    staging: rtl8712: Fix possible buffer overflow in r8712_sitesurvey_cmd
    staging: ks7010: prevent buffer overflow in ks_wlan_set_scan()
    staging: rtl8188eu: fix potential memory corruption in rtw_check_beacon_data()
    staging: rtl8712: unterminated string leads to read overflow
    staging: rtl8188eu: prevent ->ssid overflow in rtw_wx_set_scan()
    staging: rtl8192u: fix ->ssid overflow in r8192_wx_set_scan()
    usbip: fix vhci_hcd attach_store() races leading to gpf
    usbip: fix stub_dev usbip_sockfd_store() races leading to gpf
    usbip: fix vudc to check for stream socket
    usbip: fix vhci_hcd to check for stream socket
    usbip: fix stub_dev to check for stream socket
    USB: serial: cp210x: add some more GE USB IDs
    USB: serial: cp210x: add ID for Acuity Brands nLight Air Adapter
    USB: serial: ch341: add new Product ID
    USB: serial: io_edgeport: fix memory leak in edge_startup
  * xhci: Improve detection of device initiated wake signal.
      drivers/usb/host/xhci.c
    usb: renesas_usbhs: Clear PIPECFG for re-enabling pipe with other EPNUM
    usb: gadget: f_uac2: always increase endpoint max_packet_size by one audio slot
    Goodix Fingerprint device is not a modem
    scripts/recordmcount.{c,pl}: support -ffunction-sections .text.* section names
  * mmc: core: Fix partition switch time for eMMC
      drivers/mmc/core/mmc.c
    s390/dasd: fix hanging DASD driver unbind
  * ALSA: usb-audio: Fix "cannot get freq eq" errors on Dell AE515 sound bar
      sound/usb/quirks.c
    ALSA: hda: Avoid spurious unsol event handling during S3/S4
    ALSA: hda/hdmi: Cancel pending works before suspend
    scsi: libiscsi: Fix iscsi_prep_scsi_cmd_pdu() error handling
    s390/smp: __smp_rescan_cpus() - move cpumask away from stack
    PCI: xgene-msi: Fix race in installing chained irq handler
    powerpc/perf: Record counter overflow always if SAMPLE_IP is unset
    mmc: mediatek: fix race condition between msdc_request_timeout and irq
    mmc: mxs-mmc: Fix a resource leak in an error handling path in 'mxs_mmc_probe()'
    udf: fix silent AED tagLocation corruption
    media: usbtv: Fix deadlock on suspend
    net: davicom: Fix regulator not turned off on driver removal
    net: davicom: Fix regulator not turned off on failed probe
    net: lapbether: Remove netif_start_queue / netif_stop_queue
  * net: sched: avoid duplicates in classes dump
      net/sched/sch_api.c
    net/mlx4_en: update moderation when config reset
  * Revert "mm, slub: consider rest of partial list if acquire_slab() fails"
      mm/slub.c
    cifs: return proper error code in statfs(2)
  * netfilter: x_tables: gpf inside xt_find_revision()
      net/netfilter/x_tables.c
    can: flexcan: enable RX FIFO after FRZ/HALT valid
    can: flexcan: assert FRZ bit in flexcan_chip_freeze()
    can: skb: can_skb_set_owner(): fix ref counting if socket was closed before setting skb ownership
    net: avoid infinite loop in mpls_gso_segment when mpls_hlen == 0
  * net: Fix gro aggregation for udp encaps with zero csum
      net/ipv4/udp_offload.c
    ath9k: fix transmitting to stations in dynamic SMPS mode
    ethernet: alx: fix order of calls on resume
    uapi: nfnetlink_cthelper.h: fix userspace compilation error
    Merge 4.9.261 into android-4.9-q
Linux 4.9.261
    misc: eeprom_93xx46: Add quirk to support Microchip 93LC46B eeprom
  * PCI: Add function 1 DMA alias quirk for Marvell 9215 SATA controller
      drivers/pci/quirks.c
    platform/x86: acer-wmi: Add new force_caps module parameter
    iommu/amd: Fix sleeping in atomic in increase_address_space()
  * dm table: fix DAX iterate_devices based device capability checks
      drivers/md/dm-table.c
  * dm table: fix iterate_devices based device capability checks
      drivers/md/dm-table.c
    rsxx: Return -EFAULT if copy_to_user() fails
    ALSA: ctxfi: cthw20k2: fix mask on conf to allow 4 bits
    usbip: tools: fix build error for multiple definition
    btrfs: fix raid6 qstripe kmap
    btrfs: raid56: simplify tracking of Q stripe presence
    Merge 4.9.260 into android-4.9-q
Linux 4.9.260
  * media: v4l: ioctl: Fix memory leak in video_usercopy
      drivers/media/v4l2-core/v4l2-ioctl.c
  * swap: fix swapfile read/write offset
      mm/page_io.c
      mm/swapfile.c
  * zsmalloc: account the number of compacted pages correctly
      drivers/block/zram/zram_drv.c
      include/linux/zsmalloc.h
      mm/zsmalloc.c
    xen-netback: respect gnttab_map_refs()'s return value
    Xen/gnttab: handle p2m update errors on a per-slot basis
    scsi: iscsi: Verify lengths on passthrough PDUs
    scsi: iscsi: Ensure sysfs attributes are limited to PAGE_SIZE
  * sysfs: Add sysfs_emit and sysfs_emit_at to format sysfs output
      fs/sysfs/file.c
      include/linux/sysfs.h
    scsi: iscsi: Restrict sessions and handles to admin capabilities
    media: uvcvideo: Allow entities with no pads
    staging: most: sound: add sanity check for function argument
  * Bluetooth: Fix null pointer dereference in amp_read_loc_assoc_final_data
      net/bluetooth/amp.c
    x86/build: Treat R_386_PLT32 relocation as R_386_PC32
    ath10k: fix wmi mgmt tx queue full due to race condition
    pktgen: fix misuse of BUG_ON() in pktgen_thread_worker()
    wlcore: Fix command execute failure 19 for wl12xx
  * vt/consolemap: do font sum unsigned
      drivers/tty/vt/consolemap.c
    x86/reboot: Add Zotac ZBOX CI327 nano PCI reboot quirk
    staging: fwserial: Fix error handling in fwserial_create
    mm/hugetlb.c: fix unnecessary address expansion of pmd sharing
  * net: fix up truesize of cloned skb in skb_prepare_for_shift()
      net/core/skbuff.c
  * smackfs: restrict bytes count in smackfs write functions
      security/smack/smackfs.c
    xfs: Fix assert failure in xfs_setattr_size()
    JFS: more checks for invalid superblock
  * arm64: Use correct ll/sc atomic constraints
      arch/arm64/include/asm/atomic_ll_sc.h
  * arm64: cmpxchg: Use "K" instead of "L" for ll/sc immediate constraint
      arch/arm64/include/asm/atomic_ll_sc.h
  * arm64: Avoid redundant type conversions in xchg() and cmpxchg()
      arch/arm64/include/asm/atomic_ll_sc.h
      arch/arm64/include/asm/cmpxchg.h
  * arm64: Remove redundant mov from LL/SC cmpxchg
      arch/arm64/include/asm/atomic_ll_sc.h
    printk: fix deadlock when kernel panic
    hugetlb: fix update_and_free_page contig page struct assumption
  * scripts: set proper OpenSSL include dir also for sign-file
      scripts/Makefile
  * scripts: use pkg-config to locate libcrypto
      scripts/Makefile
    arm: kprobes: Allow to handle reentered kprobe on single-stepping
    net: usb: qmi_wwan: support ZTE P685M modem
  * futex: Don't enable IRQs unconditionally in put_pi_state()
      kernel/futex.c
  * futex: Fix more put_pi_state() vs. exit_pi_state_list() races
      kernel/futex.c
  * futex: Fix pi_state->owner serialization
      kernel/futex.c
  * futex: Futex_unlock_pi() determinism
      kernel/futex.c
  * futex: Pull rt_mutex_futex_unlock() out from under hb->lock
      kernel/futex.c
  * futex: Cleanup refcounting
      kernel/futex.c
  * futex: Cleanup variable names for futex_top_waiter()
      kernel/futex.c
    Merge 4.9.259 into android-4.9-q
Linux 4.9.259
    dm era: Update in-core bitset after committing the metadata
  * net: icmp: pass zeroed opts from icmp{,v6}_ndo_send before sending
      include/linux/icmpv6.h
      include/linux/ipv6.h
      include/net/icmp.h
      net/ipv4/icmp.c
      net/ipv6/icmp.c
      net/ipv6/ip6_icmp.c
  * ipv6: silence compilation warning for non-IPV6 builds
      include/linux/icmpv6.h
  * ipv6: icmp6: avoid indirect call for icmpv6_send()
      include/linux/icmpv6.h
      net/ipv6/icmp.c
      net/ipv6/ip6_icmp.c
    sunvnet: use icmp_ndo_send helper
    gtp: use icmp_ndo_send helper
  * icmp: allow icmpv6_ndo_send to work with CONFIG_IPV6=n
      include/linux/icmpv6.h
  * icmp: introduce helper for nat'd source address in network device context
      include/linux/icmpv6.h
      include/net/icmp.h
      net/ipv4/icmp.c
      net/ipv6/ip6_icmp.c
  * futex: fix dead code in attach_to_pi_owner()
      kernel/futex.c
  * futex: Fix OWNER_DEAD fixup
      kernel/futex.c
    dm era: only resize metadata in preresume
    dm era: Reinitialize bitset cache before digesting a new writeset
    dm era: Use correct value size in equality function of writeset tree
    dm era: Fix bitset memory leaks
    dm era: Verify the data block size hasn't changed
    dm era: Recover committed writeset after crash
    gfs2: Don't skip dlm unlock if glock has an lvb
    sparc32: fix a user-triggerable oops in clear_user()
  * f2fs: fix out-of-repair __setattr_copy()
      fs/f2fs/file.c
    gpio: pcf857x: Fix missing first interrupt
    mmc: sdhci-esdhc-imx: fix kernel panic when remove module
  * module: Ignore _GLOBAL_OFFSET_TABLE_ when warning for undefined symbols
      kernel/module.c
    libnvdimm/dimm: Avoid race between probe and available_slots_show()
    usb: renesas_usbhs: Clear pipe running flag in usbhs_pkt_pop()
    mm: hugetlb: fix a race between freeing and dissolving the page
    mtd: spi-nor: hisi-sfc: Put child node np on error path
    floppy: reintroduce O_NDELAY fix
    x86/reboot: Force all cpus to exit VMX root if VMX is supported
    staging: rtl8188eu: Add Edimax EW-7811UN V2 to device table
    drivers/misc/vmw_vmci: restrict too big queue size in qp_host_alloc_queue
  * seccomp: Add missing return in non-void function
      kernel/seccomp.c
    btrfs: fix extent buffer leak on failure to copy root
    btrfs: fix reloc root leak with 0 ref reloc roots on recovery
    btrfs: abort the transaction if we fail to inc ref in btrfs_copy_root
    KEYS: trusted: Fix migratable=1 failing
  * usb: dwc3: gadget: Fix dep->interval for fullspeed interrupt
      drivers/usb/dwc3/gadget.c
  * usb: dwc3: gadget: Fix setting of DEPCFG.bInterval_m1
      drivers/usb/dwc3/gadget.c
    USB: serial: mos7720: fix error code in mos7720_write()
    USB: serial: mos7840: fix error code in mos7840_write()
    usb: musb: Fix runtime PM race in musb_queue_resume_work
    USB: serial: option: update interface mapping for ZTE P685M
    Input: i8042 - add ASUS Zenbook Flip to noselftest list
    Input: joydev - prevent potential read overflow in ioctl
  * Input: xpad - add support for PowerA Enhanced Wired Controller for Xbox Series X|S
      drivers/input/joystick/xpad.c
    Input: raydium_ts_i2c - do not send zero length
    ACPI: configfs: add missing check after configfs_register_default_group()
  * blk-settings: align max_sectors on "logical_block_size" boundary
      block/blk-settings.c
  * scsi: bnx2fc: Fix Kconfig warning & CNIC build errors
      drivers/scsi/bnx2fc/Kconfig
    i2c: brcmstb: Fix brcmstd_send_i2c_cmd condition
  * arm64: Add missing ISB after invalidating TLB in __primary_switch
      arch/arm64/kernel/head.S
    mm/hugetlb: fix potential double free in hugetlb_register_node() error path
  * mm/memory.c: fix potential pte_unmap_unlock pte error
      mm/memory.c
    ocfs2: fix a use after free on error
    net/mlx4_core: Add missed mlx4_free_cmd_mailbox()
    i40e: Fix flow for IPv6 next header (extension header)
    drm/msm/dsi: Correct io_start for MSM8994 (20nm PHY)
  * PCI: Align checking of syscall user config accessors
      drivers/pci/syscall.c
    VMCI: Use set_page_dirty_lock() when unregistering guest memory
    pwm: rockchip: rockchip_pwm_probe(): Remove superfluous clk_unprepare()
    misc: eeprom_93xx46: Add module alias to avoid breaking support for non device tree users
    misc: eeprom_93xx46: Fix module alias to enable module autoprobe
    sparc64: only select COMPAT_BINFMT_ELF if BINFMT_ELF is set
    Input: elo - fix an error code in elo_connect()
    perf test: Fix unaligned access in sample parsing test
    perf intel-pt: Fix missing CYC processing in PSB
    spi: pxa2xx: Fix the controller numbering for Wildcat Point
    powerpc/pseries/dlpar: handle ibm, configure-connector delay status
    mfd: wm831x-auxadc: Prevent use after free in wm831x_auxadc_read_irq()
    RDMA/rxe: Fix coding error in rxe_recv.c
    perf tools: Fix DSO filtering when not finding a map for a sampled address
  * tracepoint: Do not fail unregistering a probe due to memory failure
      kernel/tracepoint.c
  * amba: Fix resource leak for drivers without .remove
      drivers/amba/bus.c
    ARM: 9046/1: decompressor: Do not clear SCTLR.nTLSMD for ARMv7+ cores
    mmc: usdhi6rol0: Fix a resource leak in the error handling path of the probe
    powerpc/47x: Disable 256k page size
    IB/umad: Return EIO in case of when device disassociated
    isofs: release buffer head before return
    regulator: axp20x: Fix reference cout leak
    clocksource/drivers/mxs_timer: Add missing semicolon when DEBUG is defined
    power: reset: at91-sama5d2_shdwc: fix wkupdbc mask
  * of/fdt: Make sure no-map does not remove already reserved regions
      drivers/of/fdt.c
  * fdt: Properly handle "no-map" field in the memory region
      drivers/of/fdt.c
    dmaengine: fsldma: Fix a resource leak in an error handling path of the probe function
    dmaengine: fsldma: Fix a resource leak in the remove function
  * HID: core: detect and skip invalid inputs to snto32()
      drivers/hid/hid-core.c
    spi: cadence-quadspi: Abort read if dummy cycles required are too many
    clk: meson: clk-pll: fix initializing the old rate (fallback) for a PLL
    jffs2: fix use after free in jffs2_sum_write_data()
    fs/jfs: fix potential integer overflow on shift of a int
    crypto: ecdh_helper - Ensure 'len >= secret.len' in decode_key()
    btrfs: clarify error returns values in __load_free_space_cache
    ata: ahci_brcm: Add back regulators management
    media: uvcvideo: Accept invalid bFormatIndex and bFrameIndex values
    media: pxa_camera: declare variable when DEBUG is defined
    media: cx25821: Fix a bug when reallocating some dma memory
    media: qm1d1c0042: fix error return code in qm1d1c0042_init()
    media: lmedm04: Fix misuse of comma
    ASoC: cs42l56: fix up error handling in probe
    media: tm6000: Fix memleak in tm6000_start_stream
    media: media/pci: Fix memleak in empress_init
    media: vsp1: Fix an error handling path in the probe function
    MIPS: lantiq: Explicitly compare LTQ_EBU_PCC_ISTAT against 0
    MIPS: c-r4k: Fix section mismatch for loongson2_sc_init
    gma500: clean up error handling in init
    drm/gma500: Fix error return code in psb_driver_load()
  * fbdev: aty: SPARC64 requires FB_ATY_CT
      drivers/video/fbdev/Kconfig
    b43: N-PHY: Fix the update of coef for the PHY revision >= 3case
    mac80211: fix potential overflow when multiplying to u32 integers
    xen/netback: fix spurious event detection for common event case
    bnxt_en: reverse order of TX disable and carrier off
    ARM: s3c: fix fiq for clang IAS
    arm64: dts: msm8916: Fix reserved and rfsa nodes unit address
    usb: dwc2: Make "trimming xfer length" a debug message
    usb: dwc2: Abort transaction after errors with unknown reason
    usb: dwc2: Do not update data length if it is 0 on inbound transfers
    ARM: dts: Configure missing thermal interrupt for 4430
  * Bluetooth: Put HCI device if inquiry procedure interrupts
      net/bluetooth/hci_core.c
  * Bluetooth: drop HCI device reference before return
      net/bluetooth/a2mp.c
    arm64: dts: exynos: correct PMIC interrupt trigger level on Espresso
    ARM: dts: exynos: correct PMIC interrupt trigger level on Arndale Octa
    ARM: dts: exynos: correct PMIC interrupt trigger level on Spring
  * Bluetooth: Fix initializing response id after clearing struct
      net/bluetooth/a2mp.c
    mm, thp: make do_huge_pmd_wp_page() lock page for testing mapcount
  * random: fix the RNDRESEEDCRNG ioctl
      drivers/char/random.c
    MIPS: vmlinux.lds.S: add missing PAGE_ALIGNED_DATA() section
  * kdb: Make memory allocations more robust
      kernel/debug/kdb/kdb_private.h
    scripts/recordmcount.pl: support big endian for ARCH sh
    cifs: Set CIFS_MOUNT_USE_PREFIX_PATH flag on setting cifs_sb->prepath.
    NET: usb: qmi_wwan: Adding support for Cinterion MV31
    arm64: tegra: Add power-domain for Tegra210 HDA
    igb: Remove incorrect "unexpected SYS WRAP" log message
    ntfs: check for valid standard information attribute
  * usb: quirks: add quirk to start video capture on ELMO L-12F document camera reliable
      drivers/usb/core/quirks.c
  * HID: make arrays usage and value to be the same
      drivers/hid/hid-core.c

Bug: 184596728
Change-Id: I50e867d3e8d2d62938b73e54b8d6e614bb3f2d1b
Signed-off-by: Lucas Wei <lucaswei@google.com>
2021-04-26 16:09:53 +08:00

1817 lines
47 KiB
C

/*
* linux/kernel/hrtimer.c
*
* Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
* Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
*
* High-resolution kernel timers
*
* In contrast to the low-resolution timeout API implemented in
* kernel/timer.c, hrtimers provide finer resolution and accuracy
* depending on system configuration and capabilities.
*
* These timers are currently used for:
* - itimers
* - POSIX timers
* - nanosleep
* - precise in-kernel timing
*
* Started by: Thomas Gleixner and Ingo Molnar
*
* Credits:
* based on kernel/timer.c
*
* Help, testing, suggestions, bugfixes, improvements were
* provided by:
*
* George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
* et. al.
*
* For licencing details see kernel-base/COPYING
*/
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/percpu.h>
#include <linux/hrtimer.h>
#include <linux/notifier.h>
#include <linux/syscalls.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/tick.h>
#include <linux/seq_file.h>
#include <linux/err.h>
#include <linux/debugobjects.h>
#include <linux/sched.h>
#include <linux/sched/sysctl.h>
#include <linux/sched/rt.h>
#include <linux/sched/deadline.h>
#include <linux/timer.h>
#include <linux/freezer.h>
#include <asm/uaccess.h>
#include <trace/events/timer.h>
#include "tick-internal.h"
/*
* The timer bases:
*
* There are more clockids than hrtimer bases. Thus, we index
* into the timer bases by the hrtimer_base_type enum. When trying
* to reach a base using a clockid, hrtimer_clockid_to_base()
* is used to convert from clockid to the proper hrtimer_base_type.
*/
DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
{
.lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
.seq = SEQCNT_ZERO(hrtimer_bases.seq),
.clock_base =
{
{
.index = HRTIMER_BASE_MONOTONIC,
.clockid = CLOCK_MONOTONIC,
.get_time = &ktime_get,
},
{
.index = HRTIMER_BASE_REALTIME,
.clockid = CLOCK_REALTIME,
.get_time = &ktime_get_real,
},
{
.index = HRTIMER_BASE_BOOTTIME,
.clockid = CLOCK_BOOTTIME,
.get_time = &ktime_get_boottime,
},
{
.index = HRTIMER_BASE_TAI,
.clockid = CLOCK_TAI,
.get_time = &ktime_get_clocktai,
},
}
};
static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
/* Make sure we catch unsupported clockids */
[0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES,
[CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
[CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
[CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
[CLOCK_TAI] = HRTIMER_BASE_TAI,
};
/*
* Functions and macros which are different for UP/SMP systems are kept in a
* single place
*/
#ifdef CONFIG_SMP
/*
* We require the migration_base for lock_hrtimer_base()/switch_hrtimer_base()
* such that hrtimer_callback_running() can unconditionally dereference
* timer->base->cpu_base
*/
static struct hrtimer_cpu_base migration_cpu_base = {
.seq = SEQCNT_ZERO(migration_cpu_base),
.clock_base = { { .cpu_base = &migration_cpu_base, }, },
};
#define migration_base migration_cpu_base.clock_base[0]
/*
* We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
* means that all timers which are tied to this base via timer->base are
* locked, and the base itself is locked too.
*
* So __run_timers/migrate_timers can safely modify all timers which could
* be found on the lists/queues.
*
* When the timer's base is locked, and the timer removed from list, it is
* possible to set timer->base = &migration_base and drop the lock: the timer
* remains locked.
*/
static
struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
unsigned long *flags)
{
struct hrtimer_clock_base *base;
for (;;) {
base = timer->base;
if (likely(base != &migration_base)) {
raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
if (likely(base == timer->base))
return base;
/* The timer has migrated to another CPU: */
raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
}
cpu_relax();
}
}
/*
* With HIGHRES=y we do not migrate the timer when it is expiring
* before the next event on the target cpu because we cannot reprogram
* the target cpu hardware and we would cause it to fire late.
*
* Called with cpu_base->lock of target cpu held.
*/
static int
hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
{
#ifdef CONFIG_HIGH_RES_TIMERS
ktime_t expires;
if (!new_base->cpu_base->hres_active)
return 0;
expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
return expires.tv64 <= new_base->cpu_base->expires_next.tv64;
#else
return 0;
#endif
}
#ifdef CONFIG_NO_HZ_COMMON
static inline
struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
int pinned)
{
if (pinned || !base->migration_enabled)
return base;
return &per_cpu(hrtimer_bases, get_nohz_timer_target());
}
#else
static inline
struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
int pinned)
{
return base;
}
#endif
/*
* We switch the timer base to a power-optimized selected CPU target,
* if:
* - NO_HZ_COMMON is enabled
* - timer migration is enabled
* - the timer callback is not running
* - the timer is not the first expiring timer on the new target
*
* If one of the above requirements is not fulfilled we move the timer
* to the current CPU or leave it on the previously assigned CPU if
* the timer callback is currently running.
*/
static inline struct hrtimer_clock_base *
switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
int pinned)
{
struct hrtimer_cpu_base *new_cpu_base, *this_cpu_base;
struct hrtimer_clock_base *new_base;
int basenum = base->index;
this_cpu_base = this_cpu_ptr(&hrtimer_bases);
new_cpu_base = get_target_base(this_cpu_base, pinned);
again:
new_base = &new_cpu_base->clock_base[basenum];
if (base != new_base) {
/*
* We are trying to move timer to new_base.
* However we can't change timer's base while it is running,
* so we keep it on the same CPU. No hassle vs. reprogramming
* the event source in the high resolution case. The softirq
* code will take care of this when the timer function has
* completed. There is no conflict as we hold the lock until
* the timer is enqueued.
*/
if (unlikely(hrtimer_callback_running(timer)))
return base;
/* See the comment in lock_hrtimer_base() */
timer->base = &migration_base;
raw_spin_unlock(&base->cpu_base->lock);
raw_spin_lock(&new_base->cpu_base->lock);
if (new_cpu_base != this_cpu_base &&
hrtimer_check_target(timer, new_base)) {
raw_spin_unlock(&new_base->cpu_base->lock);
raw_spin_lock(&base->cpu_base->lock);
new_cpu_base = this_cpu_base;
timer->base = base;
goto again;
}
timer->base = new_base;
} else {
if (new_cpu_base != this_cpu_base &&
hrtimer_check_target(timer, new_base)) {
new_cpu_base = this_cpu_base;
goto again;
}
}
return new_base;
}
#else /* CONFIG_SMP */
static inline struct hrtimer_clock_base *
lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
{
struct hrtimer_clock_base *base = timer->base;
raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
return base;
}
# define switch_hrtimer_base(t, b, p) (b)
#endif /* !CONFIG_SMP */
/*
* Functions for the union type storage format of ktime_t which are
* too large for inlining:
*/
#if BITS_PER_LONG < 64
/*
* Divide a ktime value by a nanosecond value
*/
s64 __ktime_divns(const ktime_t kt, s64 div)
{
int sft = 0;
s64 dclc;
u64 tmp;
dclc = ktime_to_ns(kt);
tmp = dclc < 0 ? -dclc : dclc;
/* Make sure the divisor is less than 2^32: */
while (div >> 32) {
sft++;
div >>= 1;
}
tmp >>= sft;
do_div(tmp, (unsigned long) div);
return dclc < 0 ? -tmp : tmp;
}
EXPORT_SYMBOL_GPL(__ktime_divns);
#endif /* BITS_PER_LONG >= 64 */
/*
* Add two ktime values and do a safety check for overflow:
*/
ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
{
ktime_t res = ktime_add_unsafe(lhs, rhs);
/*
* We use KTIME_SEC_MAX here, the maximum timeout which we can
* return to user space in a timespec:
*/
if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64)
res = ktime_set(KTIME_SEC_MAX, 0);
return res;
}
EXPORT_SYMBOL_GPL(ktime_add_safe);
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
static struct debug_obj_descr hrtimer_debug_descr;
static void *hrtimer_debug_hint(void *addr)
{
return ((struct hrtimer *) addr)->function;
}
/*
* fixup_init is called when:
* - an active object is initialized
*/
static bool hrtimer_fixup_init(void *addr, enum debug_obj_state state)
{
struct hrtimer *timer = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
hrtimer_cancel(timer);
debug_object_init(timer, &hrtimer_debug_descr);
return true;
default:
return false;
}
}
/*
* fixup_activate is called when:
* - an active object is activated
* - an unknown non-static object is activated
*/
static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
{
switch (state) {
case ODEBUG_STATE_ACTIVE:
WARN_ON(1);
default:
return false;
}
}
/*
* fixup_free is called when:
* - an active object is freed
*/
static bool hrtimer_fixup_free(void *addr, enum debug_obj_state state)
{
struct hrtimer *timer = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
hrtimer_cancel(timer);
debug_object_free(timer, &hrtimer_debug_descr);
return true;
default:
return false;
}
}
static struct debug_obj_descr hrtimer_debug_descr = {
.name = "hrtimer",
.debug_hint = hrtimer_debug_hint,
.fixup_init = hrtimer_fixup_init,
.fixup_activate = hrtimer_fixup_activate,
.fixup_free = hrtimer_fixup_free,
};
static inline void debug_hrtimer_init(struct hrtimer *timer)
{
debug_object_init(timer, &hrtimer_debug_descr);
}
static inline void debug_hrtimer_activate(struct hrtimer *timer)
{
debug_object_activate(timer, &hrtimer_debug_descr);
}
static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
{
debug_object_deactivate(timer, &hrtimer_debug_descr);
}
static inline void debug_hrtimer_free(struct hrtimer *timer)
{
debug_object_free(timer, &hrtimer_debug_descr);
}
static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
enum hrtimer_mode mode);
void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
enum hrtimer_mode mode)
{
debug_object_init_on_stack(timer, &hrtimer_debug_descr);
__hrtimer_init(timer, clock_id, mode);
}
EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
void destroy_hrtimer_on_stack(struct hrtimer *timer)
{
debug_object_free(timer, &hrtimer_debug_descr);
}
EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
#else
static inline void debug_hrtimer_init(struct hrtimer *timer) { }
static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
#endif
static inline void
debug_init(struct hrtimer *timer, clockid_t clockid,
enum hrtimer_mode mode)
{
debug_hrtimer_init(timer);
trace_hrtimer_init(timer, clockid, mode);
}
static inline void debug_activate(struct hrtimer *timer)
{
debug_hrtimer_activate(timer);
trace_hrtimer_start(timer);
}
static inline void debug_deactivate(struct hrtimer *timer)
{
debug_hrtimer_deactivate(timer);
trace_hrtimer_cancel(timer);
}
#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base,
struct hrtimer *timer)
{
#ifdef CONFIG_HIGH_RES_TIMERS
cpu_base->next_timer = timer;
#endif
}
static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
{
struct hrtimer_clock_base *base = cpu_base->clock_base;
ktime_t expires, expires_next = { .tv64 = KTIME_MAX };
unsigned int active = cpu_base->active_bases;
hrtimer_update_next_timer(cpu_base, NULL);
for (; active; base++, active >>= 1) {
struct timerqueue_node *next;
struct hrtimer *timer;
if (!(active & 0x01))
continue;
next = timerqueue_getnext(&base->active);
timer = container_of(next, struct hrtimer, node);
expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
if (expires.tv64 < expires_next.tv64) {
expires_next = expires;
hrtimer_update_next_timer(cpu_base, timer);
}
}
/*
* clock_was_set() might have changed base->offset of any of
* the clock bases so the result might be negative. Fix it up
* to prevent a false positive in clockevents_program_event().
*/
if (expires_next.tv64 < 0)
expires_next.tv64 = 0;
return expires_next;
}
#endif
static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
{
ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
return ktime_get_update_offsets_now(&base->clock_was_set_seq,
offs_real, offs_boot, offs_tai);
}
/* High resolution timer related functions */
#ifdef CONFIG_HIGH_RES_TIMERS
/*
* High resolution timer enabled ?
*/
static bool hrtimer_hres_enabled __read_mostly = true;
unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
EXPORT_SYMBOL_GPL(hrtimer_resolution);
/*
* Enable / Disable high resolution mode
*/
static int __init setup_hrtimer_hres(char *str)
{
return (kstrtobool(str, &hrtimer_hres_enabled) == 0);
}
__setup("highres=", setup_hrtimer_hres);
/*
* hrtimer_high_res_enabled - query, if the highres mode is enabled
*/
static inline int hrtimer_is_hres_enabled(void)
{
return hrtimer_hres_enabled;
}
/*
* Is the high resolution mode active ?
*/
static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
{
return cpu_base->hres_active;
}
static inline int hrtimer_hres_active(void)
{
return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
}
/*
* Reprogram the event source with checking both queues for the
* next event
* Called with interrupts disabled and base->lock held
*/
static void
hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
{
ktime_t expires_next;
if (!cpu_base->hres_active)
return;
expires_next = __hrtimer_get_next_event(cpu_base);
if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
return;
cpu_base->expires_next.tv64 = expires_next.tv64;
/*
* If a hang was detected in the last timer interrupt then we
* leave the hang delay active in the hardware. We want the
* system to make progress. That also prevents the following
* scenario:
* T1 expires 50ms from now
* T2 expires 5s from now
*
* T1 is removed, so this code is called and would reprogram
* the hardware to 5s from now. Any hrtimer_start after that
* will not reprogram the hardware due to hang_detected being
* set. So we'd effectivly block all timers until the T2 event
* fires.
*/
if (cpu_base->hang_detected)
return;
tick_program_event(cpu_base->expires_next, 1);
}
/*
* When a timer is enqueued and expires earlier than the already enqueued
* timers, we have to check, whether it expires earlier than the timer for
* which the clock event device was armed.
*
* Called with interrupts disabled and base->cpu_base.lock held
*/
static void hrtimer_reprogram(struct hrtimer *timer,
struct hrtimer_clock_base *base)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
/*
* If the timer is not on the current cpu, we cannot reprogram
* the other cpus clock event device.
*/
if (base->cpu_base != cpu_base)
return;
/*
* If the hrtimer interrupt is running, then it will
* reevaluate the clock bases and reprogram the clock event
* device. The callbacks are always executed in hard interrupt
* context so we don't need an extra check for a running
* callback.
*/
if (cpu_base->in_hrtirq)
return;
/*
* CLOCK_REALTIME timer might be requested with an absolute
* expiry time which is less than base->offset. Set it to 0.
*/
if (expires.tv64 < 0)
expires.tv64 = 0;
if (expires.tv64 >= cpu_base->expires_next.tv64)
return;
/* Update the pointer to the next expiring timer */
cpu_base->next_timer = timer;
/*
* If a hang was detected in the last timer interrupt then we
* do not schedule a timer which is earlier than the expiry
* which we enforced in the hang detection. We want the system
* to make progress.
*/
if (cpu_base->hang_detected)
return;
/*
* Program the timer hardware. We enforce the expiry for
* events which are already in the past.
*/
cpu_base->expires_next = expires;
tick_program_event(expires, 1);
}
/*
* Initialize the high resolution related parts of cpu_base
*/
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
{
base->expires_next.tv64 = KTIME_MAX;
base->hang_detected = 0;
base->hres_active = 0;
base->next_timer = NULL;
}
/*
* Retrigger next event is called after clock was set
*
* Called with interrupts disabled via on_each_cpu()
*/
static void retrigger_next_event(void *arg)
{
struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
if (!base->hres_active)
return;
raw_spin_lock(&base->lock);
hrtimer_update_base(base);
hrtimer_force_reprogram(base, 0);
raw_spin_unlock(&base->lock);
}
/*
* Switch to high resolution mode
*/
static void hrtimer_switch_to_hres(void)
{
struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
if (tick_init_highres()) {
printk(KERN_WARNING "Could not switch to high resolution "
"mode on CPU %d\n", base->cpu);
return;
}
base->hres_active = 1;
hrtimer_resolution = HIGH_RES_NSEC;
tick_setup_sched_timer();
/* "Retrigger" the interrupt to get things going */
retrigger_next_event(NULL);
}
static void clock_was_set_work(struct work_struct *work)
{
clock_was_set();
}
static DECLARE_WORK(hrtimer_work, clock_was_set_work);
/*
* Called from timekeeping and resume code to reprogram the hrtimer
* interrupt device on all cpus.
*/
void clock_was_set_delayed(void)
{
schedule_work(&hrtimer_work);
}
#else
static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *b) { return 0; }
static inline int hrtimer_hres_active(void) { return 0; }
static inline int hrtimer_is_hres_enabled(void) { return 0; }
static inline void hrtimer_switch_to_hres(void) { }
static inline void
hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
static inline int hrtimer_reprogram(struct hrtimer *timer,
struct hrtimer_clock_base *base)
{
return 0;
}
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
static inline void retrigger_next_event(void *arg) { }
#endif /* CONFIG_HIGH_RES_TIMERS */
/*
* Clock realtime was set
*
* Change the offset of the realtime clock vs. the monotonic
* clock.
*
* We might have to reprogram the high resolution timer interrupt. On
* SMP we call the architecture specific code to retrigger _all_ high
* resolution timer interrupts. On UP we just disable interrupts and
* call the high resolution interrupt code.
*/
void clock_was_set(void)
{
#ifdef CONFIG_HIGH_RES_TIMERS
/* Retrigger the CPU local events everywhere */
on_each_cpu(retrigger_next_event, NULL, 1);
#endif
timerfd_clock_was_set();
}
/*
* During resume we might have to reprogram the high resolution timer
* interrupt on all online CPUs. However, all other CPUs will be
* stopped with IRQs interrupts disabled so the clock_was_set() call
* must be deferred.
*/
void hrtimers_resume(void)
{
WARN_ONCE(!irqs_disabled(),
KERN_INFO "hrtimers_resume() called with IRQs enabled!");
/* Retrigger on the local CPU */
retrigger_next_event(NULL);
/* And schedule a retrigger for all others */
clock_was_set_delayed();
}
/*
* Counterpart to lock_hrtimer_base above:
*/
static inline
void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
{
raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
}
/**
* hrtimer_forward - forward the timer expiry
* @timer: hrtimer to forward
* @now: forward past this time
* @interval: the interval to forward
*
* Forward the timer expiry so it will expire in the future.
* Returns the number of overruns.
*
* Can be safely called from the callback function of @timer. If
* called from other contexts @timer must neither be enqueued nor
* running the callback and the caller needs to take care of
* serialization.
*
* Note: This only updates the timer expiry value and does not requeue
* the timer.
*/
u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
{
u64 orun = 1;
ktime_t delta;
delta = ktime_sub(now, hrtimer_get_expires(timer));
if (delta.tv64 < 0)
return 0;
if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
return 0;
if (interval.tv64 < hrtimer_resolution)
interval.tv64 = hrtimer_resolution;
if (unlikely(delta.tv64 >= interval.tv64)) {
s64 incr = ktime_to_ns(interval);
orun = ktime_divns(delta, incr);
hrtimer_add_expires_ns(timer, incr * orun);
if (hrtimer_get_expires_tv64(timer) > now.tv64)
return orun;
/*
* This (and the ktime_add() below) is the
* correction for exact:
*/
orun++;
}
hrtimer_add_expires(timer, interval);
return orun;
}
EXPORT_SYMBOL_GPL(hrtimer_forward);
/*
* enqueue_hrtimer - internal function to (re)start a timer
*
* The timer is inserted in expiry order. Insertion into the
* red black tree is O(log(n)). Must hold the base lock.
*
* Returns 1 when the new timer is the leftmost timer in the tree.
*/
static int enqueue_hrtimer(struct hrtimer *timer,
struct hrtimer_clock_base *base)
{
debug_activate(timer);
base->cpu_base->active_bases |= 1 << base->index;
/* Pairs with the lockless read in hrtimer_is_queued() */
WRITE_ONCE(timer->state, timer->state | HRTIMER_STATE_ENQUEUED);
return timerqueue_add(&base->active, &timer->node);
}
/*
* __remove_hrtimer - internal function to remove a timer
*
* Caller must hold the base lock.
*
* High resolution timer mode reprograms the clock event device when the
* timer is the one which expires next. The caller can disable this by setting
* reprogram to zero. This is useful, when the context does a reprogramming
* anyway (e.g. timer interrupt)
*/
static void __remove_hrtimer(struct hrtimer *timer,
struct hrtimer_clock_base *base,
u8 newstate, int reprogram)
{
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
u8 state = timer->state;
/*
* Pairs with the lockless read in hrtimer_is_queued()
*
* We need to preserve PINNED state here, otherwise we may end up
* migrating pinned hrtimers as well.
*/
WRITE_ONCE(timer->state, newstate | (state & HRTIMER_STATE_PINNED));
if (!(state & HRTIMER_STATE_ENQUEUED))
return;
if (!timerqueue_del(&base->active, &timer->node))
cpu_base->active_bases &= ~(1 << base->index);
#ifdef CONFIG_HIGH_RES_TIMERS
/*
* Note: If reprogram is false we do not update
* cpu_base->next_timer. This happens when we remove the first
* timer on a remote cpu. No harm as we never dereference
* cpu_base->next_timer. So the worst thing what can happen is
* an superflous call to hrtimer_force_reprogram() on the
* remote cpu later on if the same timer gets enqueued again.
*/
if (reprogram && timer == cpu_base->next_timer)
hrtimer_force_reprogram(cpu_base, 1);
#endif
}
/*
* remove hrtimer, called with base lock held
*/
static inline int
remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
{
u8 state = timer->state;
if (state & HRTIMER_STATE_ENQUEUED) {
int reprogram;
/*
* Remove the timer and force reprogramming when high
* resolution mode is active and the timer is on the current
* CPU. If we remove a timer on another CPU, reprogramming is
* skipped. The interrupt event on this CPU is fired and
* reprogramming happens in the interrupt handler. This is a
* rare case and less expensive than a smp call.
*/
debug_deactivate(timer);
reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
if (!restart)
state = HRTIMER_STATE_INACTIVE;
__remove_hrtimer(timer, base, state, reprogram);
timer->state &= ~HRTIMER_STATE_PINNED;
return 1;
}
return 0;
}
static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
const enum hrtimer_mode mode)
{
#ifdef CONFIG_TIME_LOW_RES
/*
* CONFIG_TIME_LOW_RES indicates that the system has no way to return
* granular time values. For relative timers we add hrtimer_resolution
* (i.e. one jiffie) to prevent short timeouts.
*/
timer->is_rel = mode & HRTIMER_MODE_REL;
if (timer->is_rel)
tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution));
#endif
return tim;
}
/**
* hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
* @timer: the timer to be added
* @tim: expiry time
* @delta_ns: "slack" range for the timer
* @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
* relative (HRTIMER_MODE_REL)
*/
void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
u64 delta_ns, const enum hrtimer_mode mode)
{
struct hrtimer_clock_base *base, *new_base;
unsigned long flags;
int leftmost;
base = lock_hrtimer_base(timer, &flags);
/* Remove an active timer from the queue: */
remove_hrtimer(timer, base, true);
if (mode & HRTIMER_MODE_REL)
tim = ktime_add_safe(tim, base->get_time());
tim = hrtimer_update_lowres(timer, tim, mode);
hrtimer_set_expires_range_ns(timer, tim, delta_ns);
/* Switch the timer base, if necessary: */
new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
/* Update pinned state */
timer->state &= ~HRTIMER_STATE_PINNED;
timer->state |= (!!(mode & HRTIMER_MODE_PINNED)) << HRTIMER_PINNED_SHIFT;
leftmost = enqueue_hrtimer(timer, new_base);
if (!leftmost)
goto unlock;
if (!hrtimer_is_hres_active(timer)) {
/*
* Kick to reschedule the next tick to handle the new timer
* on dynticks target.
*/
if (new_base->cpu_base->nohz_active)
wake_up_nohz_cpu(new_base->cpu_base->cpu);
} else {
hrtimer_reprogram(timer, new_base);
}
unlock:
unlock_hrtimer_base(timer, &flags);
}
EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
/**
* hrtimer_try_to_cancel - try to deactivate a timer
* @timer: hrtimer to stop
*
* Returns:
* 0 when the timer was not active
* 1 when the timer was active
* -1 when the timer is currently excuting the callback function and
* cannot be stopped
*/
int hrtimer_try_to_cancel(struct hrtimer *timer)
{
struct hrtimer_clock_base *base;
unsigned long flags;
int ret = -1;
/*
* Check lockless first. If the timer is not active (neither
* enqueued nor running the callback, nothing to do here. The
* base lock does not serialize against a concurrent enqueue,
* so we can avoid taking it.
*/
if (!hrtimer_active(timer))
return 0;
base = lock_hrtimer_base(timer, &flags);
if (!hrtimer_callback_running(timer))
ret = remove_hrtimer(timer, base, false);
unlock_hrtimer_base(timer, &flags);
return ret;
}
EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
/**
* hrtimer_cancel - cancel a timer and wait for the handler to finish.
* @timer: the timer to be cancelled
*
* Returns:
* 0 when the timer was not active
* 1 when the timer was active
*/
int hrtimer_cancel(struct hrtimer *timer)
{
for (;;) {
int ret = hrtimer_try_to_cancel(timer);
if (ret >= 0)
return ret;
udelay(1);
}
}
EXPORT_SYMBOL_GPL(hrtimer_cancel);
/**
* hrtimer_get_remaining - get remaining time for the timer
* @timer: the timer to read
* @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y
*/
ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust)
{
unsigned long flags;
ktime_t rem;
lock_hrtimer_base(timer, &flags);
if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust)
rem = hrtimer_expires_remaining_adjusted(timer);
else
rem = hrtimer_expires_remaining(timer);
unlock_hrtimer_base(timer, &flags);
return rem;
}
EXPORT_SYMBOL_GPL(__hrtimer_get_remaining);
#ifdef CONFIG_NO_HZ_COMMON
/**
* hrtimer_get_next_event - get the time until next expiry event
*
* Returns the next expiry time or KTIME_MAX if no timer is pending.
*/
u64 hrtimer_get_next_event(void)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
u64 expires = KTIME_MAX;
unsigned long flags;
raw_spin_lock_irqsave(&cpu_base->lock, flags);
if (!__hrtimer_hres_active(cpu_base))
expires = __hrtimer_get_next_event(cpu_base).tv64;
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
return expires;
}
#endif
static inline int hrtimer_clockid_to_base(clockid_t clock_id)
{
if (likely(clock_id < MAX_CLOCKS)) {
int base = hrtimer_clock_to_base_table[clock_id];
if (likely(base != HRTIMER_MAX_CLOCK_BASES))
return base;
}
WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id);
return HRTIMER_BASE_MONOTONIC;
}
static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
enum hrtimer_mode mode)
{
struct hrtimer_cpu_base *cpu_base;
int base;
memset(timer, 0, sizeof(struct hrtimer));
cpu_base = raw_cpu_ptr(&hrtimer_bases);
/*
* POSIX magic: Relative CLOCK_REALTIME timers are not affected by
* clock modifications, so they needs to become CLOCK_MONOTONIC to
* ensure POSIX compliance.
*/
if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
clock_id = CLOCK_MONOTONIC;
base = hrtimer_clockid_to_base(clock_id);
timer->base = &cpu_base->clock_base[base];
timerqueue_init(&timer->node);
}
/**
* hrtimer_init - initialize a timer to the given clock
* @timer: the timer to be initialized
* @clock_id: the clock to be used
* @mode: timer mode abs/rel
*/
void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
enum hrtimer_mode mode)
{
debug_init(timer, clock_id, mode);
__hrtimer_init(timer, clock_id, mode);
}
EXPORT_SYMBOL_GPL(hrtimer_init);
/*
* A timer is active, when it is enqueued into the rbtree or the
* callback function is running or it's in the state of being migrated
* to another cpu.
*
* It is important for this function to not return a false negative.
*/
bool hrtimer_active(const struct hrtimer *timer)
{
struct hrtimer_cpu_base *cpu_base;
unsigned int seq;
do {
cpu_base = READ_ONCE(timer->base->cpu_base);
seq = raw_read_seqcount_begin(&cpu_base->seq);
if (((timer->state & ~HRTIMER_STATE_PINNED) !=
HRTIMER_STATE_INACTIVE) || cpu_base->running == timer)
return true;
} while (read_seqcount_retry(&cpu_base->seq, seq) ||
cpu_base != READ_ONCE(timer->base->cpu_base));
return false;
}
EXPORT_SYMBOL_GPL(hrtimer_active);
/*
* The write_seqcount_barrier()s in __run_hrtimer() split the thing into 3
* distinct sections:
*
* - queued: the timer is queued
* - callback: the timer is being ran
* - post: the timer is inactive or (re)queued
*
* On the read side we ensure we observe timer->state and cpu_base->running
* from the same section, if anything changed while we looked at it, we retry.
* This includes timer->base changing because sequence numbers alone are
* insufficient for that.
*
* The sequence numbers are required because otherwise we could still observe
* a false negative if the read side got smeared over multiple consequtive
* __run_hrtimer() invocations.
*/
static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
struct hrtimer_clock_base *base,
struct hrtimer *timer, ktime_t *now)
{
enum hrtimer_restart (*fn)(struct hrtimer *);
int restart;
lockdep_assert_held(&cpu_base->lock);
debug_deactivate(timer);
cpu_base->running = timer;
/*
* Separate the ->running assignment from the ->state assignment.
*
* As with a regular write barrier, this ensures the read side in
* hrtimer_active() cannot observe cpu_base->running == NULL &&
* timer->state == INACTIVE.
*/
raw_write_seqcount_barrier(&cpu_base->seq);
__remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
fn = timer->function;
/*
* Clear the 'is relative' flag for the TIME_LOW_RES case. If the
* timer is restarted with a period then it becomes an absolute
* timer. If its not restarted it does not matter.
*/
if (IS_ENABLED(CONFIG_TIME_LOW_RES))
timer->is_rel = false;
/*
* Because we run timers from hardirq context, there is no chance
* they get migrated to another cpu, therefore its safe to unlock
* the timer base.
*/
raw_spin_unlock(&cpu_base->lock);
trace_hrtimer_expire_entry(timer, now);
restart = fn(timer);
trace_hrtimer_expire_exit(timer);
raw_spin_lock(&cpu_base->lock);
/*
* Note: We clear the running state after enqueue_hrtimer and
* we do not reprogram the event hardware. Happens either in
* hrtimer_start_range_ns() or in hrtimer_interrupt()
*
* Note: Because we dropped the cpu_base->lock above,
* hrtimer_start_range_ns() can have popped in and enqueued the timer
* for us already.
*/
if (restart != HRTIMER_NORESTART &&
!(timer->state & HRTIMER_STATE_ENQUEUED))
enqueue_hrtimer(timer, base);
/*
* Separate the ->running assignment from the ->state assignment.
*
* As with a regular write barrier, this ensures the read side in
* hrtimer_active() cannot observe cpu_base->running == NULL &&
* timer->state == INACTIVE.
*/
raw_write_seqcount_barrier(&cpu_base->seq);
WARN_ON_ONCE(cpu_base->running != timer);
cpu_base->running = NULL;
}
static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
{
struct hrtimer_clock_base *base = cpu_base->clock_base;
unsigned int active = cpu_base->active_bases;
for (; active; base++, active >>= 1) {
struct timerqueue_node *node;
ktime_t basenow;
if (!(active & 0x01))
continue;
basenow = ktime_add(now, base->offset);
while ((node = timerqueue_getnext(&base->active))) {
struct hrtimer *timer;
timer = container_of(node, struct hrtimer, node);
/*
* The immediate goal for using the softexpires is
* minimizing wakeups, not running timers at the
* earliest interrupt after their soft expiration.
* This allows us to avoid using a Priority Search
* Tree, which can answer a stabbing querry for
* overlapping intervals and instead use the simple
* BST we already have.
* We don't add extra wakeups by delaying timers that
* are right-of a not yet expired timer, because that
* timer will have to trigger a wakeup anyway.
*/
if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
break;
__run_hrtimer(cpu_base, base, timer, &basenow);
}
}
}
#ifdef CONFIG_HIGH_RES_TIMERS
/*
* High resolution timer interrupt
* Called with interrupts disabled
*/
void hrtimer_interrupt(struct clock_event_device *dev)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
ktime_t expires_next, now, entry_time, delta;
int retries = 0;
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
dev->next_event.tv64 = KTIME_MAX;
raw_spin_lock(&cpu_base->lock);
entry_time = now = hrtimer_update_base(cpu_base);
retry:
cpu_base->in_hrtirq = 1;
/*
* We set expires_next to KTIME_MAX here with cpu_base->lock
* held to prevent that a timer is enqueued in our queue via
* the migration code. This does not affect enqueueing of
* timers which run their callback and need to be requeued on
* this CPU.
*/
cpu_base->expires_next.tv64 = KTIME_MAX;
__hrtimer_run_queues(cpu_base, now);
/* Reevaluate the clock bases for the next expiry */
expires_next = __hrtimer_get_next_event(cpu_base);
/*
* Store the new expiry value so the migration code can verify
* against it.
*/
cpu_base->expires_next = expires_next;
cpu_base->in_hrtirq = 0;
raw_spin_unlock(&cpu_base->lock);
/* Reprogramming necessary ? */
if (!tick_program_event(expires_next, 0)) {
cpu_base->hang_detected = 0;
return;
}
/*
* The next timer was already expired due to:
* - tracing
* - long lasting callbacks
* - being scheduled away when running in a VM
*
* We need to prevent that we loop forever in the hrtimer
* interrupt routine. We give it 3 attempts to avoid
* overreacting on some spurious event.
*
* Acquire base lock for updating the offsets and retrieving
* the current time.
*/
raw_spin_lock(&cpu_base->lock);
now = hrtimer_update_base(cpu_base);
cpu_base->nr_retries++;
if (++retries < 3)
goto retry;
/*
* Give the system a chance to do something else than looping
* here. We stored the entry time, so we know exactly how long
* we spent here. We schedule the next event this amount of
* time away.
*/
cpu_base->nr_hangs++;
cpu_base->hang_detected = 1;
raw_spin_unlock(&cpu_base->lock);
delta = ktime_sub(now, entry_time);
if ((unsigned int)delta.tv64 > cpu_base->max_hang_time)
cpu_base->max_hang_time = (unsigned int) delta.tv64;
/*
* Limit it to a sensible value as we enforce a longer
* delay. Give the CPU at least 100ms to catch up.
*/
if (delta.tv64 > 100 * NSEC_PER_MSEC)
expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
else
expires_next = ktime_add(now, delta);
tick_program_event(expires_next, 1);
printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
ktime_to_ns(delta));
}
/*
* local version of hrtimer_peek_ahead_timers() called with interrupts
* disabled.
*/
static inline void __hrtimer_peek_ahead_timers(void)
{
struct tick_device *td;
if (!hrtimer_hres_active())
return;
td = this_cpu_ptr(&tick_cpu_device);
if (td && td->evtdev)
hrtimer_interrupt(td->evtdev);
}
#else /* CONFIG_HIGH_RES_TIMERS */
static inline void __hrtimer_peek_ahead_timers(void) { }
#endif /* !CONFIG_HIGH_RES_TIMERS */
/*
* Called from run_local_timers in hardirq context every jiffy
*/
void hrtimer_run_queues(void)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
ktime_t now;
if (__hrtimer_hres_active(cpu_base))
return;
/*
* This _is_ ugly: We have to check periodically, whether we
* can switch to highres and / or nohz mode. The clocksource
* switch happens with xtime_lock held. Notification from
* there only sets the check bit in the tick_oneshot code,
* otherwise we might deadlock vs. xtime_lock.
*/
if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) {
hrtimer_switch_to_hres();
return;
}
raw_spin_lock(&cpu_base->lock);
now = hrtimer_update_base(cpu_base);
__hrtimer_run_queues(cpu_base, now);
raw_spin_unlock(&cpu_base->lock);
}
/*
* Sleep related functions:
*/
static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
{
struct hrtimer_sleeper *t =
container_of(timer, struct hrtimer_sleeper, timer);
struct task_struct *task = t->task;
t->task = NULL;
if (task)
wake_up_process(task);
return HRTIMER_NORESTART;
}
void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
{
sl->timer.function = hrtimer_wakeup;
sl->task = task;
}
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
{
hrtimer_init_sleeper(t, current);
do {
set_current_state(TASK_INTERRUPTIBLE);
hrtimer_start_expires(&t->timer, mode);
if (likely(t->task))
freezable_schedule();
hrtimer_cancel(&t->timer);
mode = HRTIMER_MODE_ABS;
} while (t->task && !signal_pending(current));
__set_current_state(TASK_RUNNING);
return t->task == NULL;
}
static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
{
struct timespec rmt;
ktime_t rem;
rem = hrtimer_expires_remaining(timer);
if (rem.tv64 <= 0)
return 0;
rmt = ktime_to_timespec(rem);
if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
return -EFAULT;
return 1;
}
long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
{
struct hrtimer_sleeper t;
struct timespec __user *rmtp;
int ret = 0;
hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid,
HRTIMER_MODE_ABS);
hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
if (do_nanosleep(&t, HRTIMER_MODE_ABS))
goto out;
rmtp = restart->nanosleep.rmtp;
if (rmtp) {
ret = update_rmtp(&t.timer, rmtp);
if (ret <= 0)
goto out;
}
/* The other values in restart are already filled in */
ret = -ERESTART_RESTARTBLOCK;
out:
destroy_hrtimer_on_stack(&t.timer);
return ret;
}
long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
const enum hrtimer_mode mode, const clockid_t clockid)
{
struct restart_block *restart;
struct hrtimer_sleeper t;
int ret = 0;
u64 slack;
slack = current->timer_slack_ns;
if (dl_task(current) || rt_task(current))
slack = 0;
hrtimer_init_on_stack(&t.timer, clockid, mode);
hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
if (do_nanosleep(&t, mode))
goto out;
/* Absolute timers do not update the rmtp value and restart: */
if (mode == HRTIMER_MODE_ABS) {
ret = -ERESTARTNOHAND;
goto out;
}
if (rmtp) {
ret = update_rmtp(&t.timer, rmtp);
if (ret <= 0)
goto out;
}
restart = &current->restart_block;
restart->nanosleep.clockid = t.timer.base->clockid;
restart->nanosleep.rmtp = rmtp;
restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
set_restart_fn(restart, hrtimer_nanosleep_restart);
ret = -ERESTART_RESTARTBLOCK;
out:
destroy_hrtimer_on_stack(&t.timer);
return ret;
}
SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
struct timespec __user *, rmtp)
{
struct timespec tu;
if (copy_from_user(&tu, rqtp, sizeof(tu)))
return -EFAULT;
if (!timespec_valid(&tu))
return -EINVAL;
return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
}
/*
* Functions related to boot-time initialization:
*/
int hrtimers_prepare_cpu(unsigned int cpu)
{
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
int i;
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
cpu_base->clock_base[i].cpu_base = cpu_base;
timerqueue_init_head(&cpu_base->clock_base[i].active);
}
cpu_base->active_bases = 0;
cpu_base->cpu = cpu;
hrtimer_init_hres(cpu_base);
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
struct hrtimer_clock_base *new_base,
bool remove_pinned)
{
struct hrtimer *timer;
struct timerqueue_node *node;
struct timerqueue_head pinned;
int is_pinned;
bool is_hotplug = !cpu_online(old_base->cpu_base->cpu);
timerqueue_init_head(&pinned);
while ((node = timerqueue_getnext(&old_base->active))) {
timer = container_of(node, struct hrtimer, node);
if (is_hotplug)
BUG_ON(hrtimer_callback_running(timer));
debug_deactivate(timer);
/*
* Mark it as ENQUEUED not INACTIVE otherwise the
* timer could be seen as !active and just vanish away
* under us on another CPU
*/
__remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
is_pinned = timer->state & HRTIMER_STATE_PINNED;
if (!remove_pinned && is_pinned) {
timerqueue_add(&pinned, &timer->node);
continue;
}
timer->base = new_base;
/*
* Enqueue the timers on the new cpu. This does not
* reprogram the event device in case the timer
* expires before the earliest on this CPU, but we run
* hrtimer_interrupt after we migrated everything to
* sort out already expired timers and reprogram the
* event device.
*/
enqueue_hrtimer(timer, new_base);
}
/* Re-queue pinned timers for non-hotplug usecase */
while ((node = timerqueue_getnext(&pinned))) {
timer = container_of(node, struct hrtimer, node);
timerqueue_del(&pinned, &timer->node);
enqueue_hrtimer(timer, old_base);
}
}
static void __migrate_hrtimers(unsigned int scpu, bool remove_pinned)
{
struct hrtimer_cpu_base *old_base, *new_base;
unsigned long flags;
int i;
local_irq_save(flags);
old_base = &per_cpu(hrtimer_bases, scpu);
new_base = this_cpu_ptr(&hrtimer_bases);
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
*/
raw_spin_lock(&new_base->lock);
raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
migrate_hrtimer_list(&old_base->clock_base[i],
&new_base->clock_base[i], remove_pinned);
}
raw_spin_unlock(&old_base->lock);
raw_spin_unlock(&new_base->lock);
/* Check, if we got expired work to do */
__hrtimer_peek_ahead_timers();
local_irq_restore(flags);
}
int hrtimers_dead_cpu(unsigned int scpu)
{
BUG_ON(cpu_online(scpu));
tick_cancel_sched_timer(scpu);
__migrate_hrtimers(scpu, true);
return 0;
}
void hrtimer_quiesce_cpu(void *cpup)
{
__migrate_hrtimers(*(int *)cpup, false);
}
#endif /* CONFIG_HOTPLUG_CPU */
void __init hrtimers_init(void)
{
hrtimers_prepare_cpu(smp_processor_id());
}
/**
* schedule_hrtimeout_range_clock - sleep until timeout
* @expires: timeout value (ktime_t)
* @delta: slack in expires timeout (ktime_t)
* @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
* @clock: timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME
*/
int __sched
schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
const enum hrtimer_mode mode, int clock)
{
struct hrtimer_sleeper t;
/*
* Optimize when a zero timeout value is given. It does not
* matter whether this is an absolute or a relative time.
*/
if (expires && !expires->tv64) {
__set_current_state(TASK_RUNNING);
return 0;
}
/*
* A NULL parameter means "infinite"
*/
if (!expires) {
schedule();
return -EINTR;
}
hrtimer_init_on_stack(&t.timer, clock, mode);
hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
hrtimer_init_sleeper(&t, current);
hrtimer_start_expires(&t.timer, mode);
if (likely(t.task))
schedule();
hrtimer_cancel(&t.timer);
destroy_hrtimer_on_stack(&t.timer);
__set_current_state(TASK_RUNNING);
return !t.task ? 0 : -EINTR;
}
/**
* schedule_hrtimeout_range - sleep until timeout
* @expires: timeout value (ktime_t)
* @delta: slack in expires timeout (ktime_t)
* @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
*
* Make the current task sleep until the given expiry time has
* elapsed. The routine will return immediately unless
* the current task state has been set (see set_current_state()).
*
* The @delta argument gives the kernel the freedom to schedule the
* actual wakeup to a time that is both power and performance friendly.
* The kernel give the normal best effort behavior for "@expires+@delta",
* but may decide to fire the timer earlier, but no earlier than @expires.
*
* You can set the task state as follows -
*
* %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
* pass before the routine returns.
*
* %TASK_INTERRUPTIBLE - the routine may return early if a signal is
* delivered to the current task.
*
* The current task state is guaranteed to be TASK_RUNNING when this
* routine returns.
*
* Returns 0 when the timer has expired otherwise -EINTR
*/
int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta,
const enum hrtimer_mode mode)
{
return schedule_hrtimeout_range_clock(expires, delta, mode,
CLOCK_MONOTONIC);
}
EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
/**
* schedule_hrtimeout - sleep until timeout
* @expires: timeout value (ktime_t)
* @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
*
* Make the current task sleep until the given expiry time has
* elapsed. The routine will return immediately unless
* the current task state has been set (see set_current_state()).
*
* You can set the task state as follows -
*
* %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
* pass before the routine returns.
*
* %TASK_INTERRUPTIBLE - the routine may return early if a signal is
* delivered to the current task.
*
* The current task state is guaranteed to be TASK_RUNNING when this
* routine returns.
*
* Returns 0 when the timer has expired otherwise -EINTR
*/
int __sched schedule_hrtimeout(ktime_t *expires,
const enum hrtimer_mode mode)
{
return schedule_hrtimeout_range(expires, 0, mode);
}
EXPORT_SYMBOL_GPL(schedule_hrtimeout);