Files
lucaswei 2b5e67e8a2 Merge android-4.9-q (4.9.232) into android-msm-pixel-4.9-lts
Merge 4.9.232 into android-4.9-q
Linux 4.9.232
    perf: Make perf able to build with latest libbfd
    perf tools: Fix snprint warnings for gcc 8
    perf annotate: Use asprintf when formatting objdump command line
    perf probe: Fix to check blacklist address correctly
    xfs: set format back to extents if xfs_bmap_extents_to_btree
  * regmap: debugfs: check count when read regmap file
      drivers/base/regmap/regmap-debugfs.c
    drivers/net/wan/x25_asy: Fix to make it work
    ip6_gre: fix null-ptr-deref in ip6gre_init_net()
  * tcp: allow at most one TLP probe per flight
      include/linux/tcp.h
      net/ipv4/tcp_input.c
      net/ipv4/tcp_output.c
    AX.25: Prevent integer overflows in connect and sendmsg
    rxrpc: Fix sendmsg() returning EPIPE due to recvmsg() returning ENODATA
  * net: udp: Fix wrong clean up for IS_UDPLITE macro
      net/ipv4/udp.c
      net/ipv6/udp.c
  * net-sysfs: add a newline when printing 'tx_timeout' by sysfs
      net/core/net-sysfs.c
  * dev: Defer free of skbs in flush_backlog
      net/core/dev.c
    AX.25: Prevent out-of-bounds read in ax25_sendmsg()
    AX.25: Fix out-of-bounds read in ax25_connect()
    ath9k: Fix regression with Atheros 9271
    ath9k: Fix general protection fault in ath9k_hif_usb_rx_cb
    parisc: Add atomic64_set_release() define to avoid CPU soft lockups
    io-mapping: indicate mapping failure
  * mm/memcg: fix refcount error while moving and swapping
      mm/memcontrol.c
  * Makefile: Fix GCC_TOOLCHAIN_DIR prefix for Clang cross compilation
      Makefile
  * vt: Reject zero-sized screen buffer size.
      drivers/tty/vt/vt.c
    serial: 8250_mtk: Fix high-speed baud rates clamping
    serial: 8250: fix null-ptr-deref in serial8250_start_tx()
    staging: comedi: addi_apci_1564: check INSN_CONFIG_DIGITAL_TRIG shift
    staging: comedi: addi_apci_1500: check INSN_CONFIG_DIGITAL_TRIG shift
    staging: comedi: ni_6527: fix INSN_CONFIG_DIGITAL_TRIG support
    staging: comedi: addi_apci_1032: check INSN_CONFIG_DIGITAL_TRIG shift
    staging: wlan-ng: properly check endpoint types
    Revert "cifs: Fix the target file was deleted when rename failed."
  * usb: xhci: Fix ASM2142/ASM3142 DMA addressing
      drivers/usb/host/xhci-pci.c
    usb: xhci-mtk: fix the failure of bandwidth allocation
    x86: math-emu: Fix up 'cmp' insn for clang ias
  * arm64: Use test_tsk_thread_flag() for checking TIF_SINGLESTEP
      arch/arm64/kernel/debug-monitors.c
    usb: gadget: udc: gr_udc: fix memleak on error handling path in gr_ep_init()
    dmaengine: ioat setting ioat timeout as module parameter
  * regmap: dev_get_regmap_match(): fix string comparison
      drivers/base/regmap/regmap.c
    dmaengine: tegra210-adma: Fix runtime PM imbalance on error
  * HID: apple: Disable Fn-key key-re-mapping on clone keyboards
      drivers/hid/hid-apple.c
    HID: i2c-hid: add Mediacom FlexBook edge13 to descriptor override
    scripts/decode_stacktrace: strip basepath from all paths
    net: smc91x: Fix possible memory leak in smc_drv_probe()
    net: dp83640: fix SIOCSHWTSTAMP to update the struct with actual configuration
  * ax88172a: fix ax88172a_unbind() failures
      drivers/net/usb/ax88172a.c
    hippi: Fix a size used in a 'pci_free_consistent()' in an error handling path
    bnxt_en: Fix race when modifying pause settings.
    btrfs: fix mount failure caused by race with umount
    btrfs: fix double free on ulist after backref resolution failure
    ASoC: rt5670: Correct RT5670_LDO_SEL_MASK
  * ALSA: info: Drop WARN_ON() from buffer NULL sanity check
      sound/core/info.c
    uprobes: Change handle_swbp() to send SIGTRAP with si_code=SI_KERNEL, to fix GDB regression
    SUNRPC reverting d03727b248d0 ("NFSv4 fix CLOSE not waiting for direct IO compeletion")
    drm/nouveau/i2c/g94-: increase NV_PMGR_DP_AUXCTL_TRANSACTREQ timeout
    net: sky2: initialize return of gm_phy_read
    drivers/net/wan/lapbether: Fixed the value of hard_header_len
    xtensa: update *pos in cpuinfo_op.next
    xtensa: fix __sync_fetch_and_{and,or}_4 declarations
    scsi: scsi_transport_spi: Fix function pointer check
    mac80211: allow rx of mesh eapol frames with default rx key
    pinctrl: amd: fix npins for uart0 in kerncz_groups
  * UPSTREAM: xtables: extend matches and targets with .usersize
      net/netfilter/xt_CT.c
      net/netfilter/xt_TEE.c
      net/netfilter/xt_bpf.c
      net/netfilter/xt_connlimit.c
      net/netfilter/xt_hashlimit.c
      net/netfilter/xt_limit.c
      net/netfilter/xt_quota.c
      net/netfilter/xt_string.c
  * UPSTREAM: ip6tables: use match, target and data copy_to_user helpers
      net/ipv6/netfilter/ip6_tables.c
  * UPSTREAM: iptables: use match, target and data copy_to_user helpers
      net/ipv4/netfilter/ip_tables.c
  * UPSTREAM: xtables: add xt_match, xt_target and data copy_to_user functions
      include/linux/netfilter/x_tables.h
      net/netfilter/x_tables.c
    Merge 4.9.231 into android-4.9-q
Linux 4.9.231
    x86/cpu: Move x86_cache_bits settings
  * irqchip/gic: Atomically update affinity
      drivers/irqchip/irq-gic.c
  * sched/fair: handle case of task_h_load() returning 0
      kernel/sched/fair.c
  * arm64: ptrace: Override SPSR.SS when single-stepping is enabled
      arch/arm64/include/asm/debug-monitors.h
      arch/arm64/kernel/debug-monitors.c
      arch/arm64/kernel/ptrace.c
    misc: atmel-ssc: lock with mutex instead of spinlock
    dmaengine: fsl-edma: Fix NULL pointer exception in fsl_edma_tx_handler
    hwmon: (emc2103) fix unable to change fan pwm1_enable attribute
    MIPS: Fix build for LTS kernel caused by backporting lpj adjustment
  * timer: Fix wheel index calculation on last level
      kernel/time/timer.c
    uio_pdrv_genirq: fix use without device tree and no interrupt
    Input: i8042 - add Lenovo XiaoXin Air 12 to i8042 nomux list
    mei: bus: don't clean driver pointer
  * fuse: Fix parameter for FS_IOC_{GET,SET}FLAGS
      fs/fuse/file.c
    virtio: virtio_console: add missing MODULE_DEVICE_TABLE() for rproc serial
    USB: serial: option: add Quectel EG95 LTE modem
    USB: serial: option: add GosunCn GM500 series
    USB: serial: ch341: add new Product ID for CH340
    USB: serial: cypress_m8: enable Simply Automated UPB PIM
    USB: serial: iuu_phoenix: fix memory corruption
    usb: gadget: function: fix missing spinlock in f_uac1_legacy
    usb: chipidea: core: add wakeup support for extcon
    usb: dwc2: Fix shutdown callback in platform
    USB: c67x00: fix use after free in c67x00_giveback_urb
  * ALSA: usb-audio: Fix race against the error recovery URB submission
      sound/usb/midi.c
    ALSA: line6: Perform sanity check for each URB creation
  * usb: core: Add a helper function to check the validity of EP type in URB
      drivers/usb/core/urb.c
      include/linux/usb.h
  * HID: magicmouse: do not set up autorepeat
      drivers/hid/hid-magicmouse.c
    mtd: rawnand: brcmnand: fix CS0 layout
    perf stat: Zero all the 'ena' and 'run' array slot stats for interval mode
    ARM: dts: socfpga: Align L2 cache-controller nodename with dtschema
    Revert "thermal: mediatek: fix register index error"
    staging: comedi: verify array index is correct before using it
    usb: gadget: udc: atmel: fix uninitialized read in debug printk
    spi: spi-sun6i: sun6i_spi_transfer_one(): fix setting of clock rate
    iio:health:afe4404 Fix timestamp alignment and prevent data leak.
    Revert "usb/ohci-platform: Fix a warning when hibernating"
  * Revert "usb/xhci-plat: Set PM runtime as active on resume"
      drivers/usb/host/xhci-plat.c
    Revert "usb/ehci-platform: Set PM runtime as active on resume"
    net: dsa: bcm_sf2: Fix node reference count
    spi: fix initial SPI_SR value in spi-fsl-dspi
    iio:health:afe4403 Fix timestamp alignment and prevent data leak.
    iio:pressure:ms5611 Fix buffer element alignment
    iio: pressure: zpa2326: handle pm_runtime_get_sync failure
    iio: mma8452: Add missed iio_device_unregister() call in mma8452_probe()
    iio: magnetometer: ak8974: Fix runtime PM imbalance on error
    iio:magnetometer:ak8974: Fix alignment and data leak issues
    i2c: eg20t: Load module automatically if ID matches
  * cgroup: Fix sock_cgroup_data on big-endian.
      include/linux/cgroup-defs.h
  * cgroup: fix cgroup_sk_alloc() for sk_clone_lock()
      include/linux/cgroup-defs.h
      include/linux/cgroup.h
      kernel/cgroup.c
      net/core/sock.c
  * tcp: md5: allow changing MD5 keys in all socket states
      net/ipv4/tcp.c
  * tcp: md5: do not send silly options in SYNCOOKIES
      net/ipv4/tcp_output.c
  * tcp: make sure listeners don't initialize congestion-control state
      net/ipv4/tcp.c
      net/ipv4/tcp_cong.c
  * genetlink: remove genl_bind
      include/net/genetlink.h
      net/netlink/genetlink.c
  * tcp: md5: refine tcp_md5_do_add()/tcp_md5_hash_key() barriers
      net/ipv4/tcp.c
      net/ipv4/tcp_ipv4.c
  * tcp: md5: add missing memory barriers in tcp_md5_do_add()/tcp_md5_hash_key()
      net/ipv4/tcp.c
      net/ipv4/tcp_ipv4.c
    net: usb: qmi_wwan: add support for Quectel EG95 LTE modem
  * net: Added pointer check for dst->ops->neigh_lookup in dst_neigh_lookup_skb
      include/net/dst.h
    llc: make sure applications use ARPHRD_ETHER
  * l2tp: remove skb_dst_set() from l2tp_xmit_skb()
      net/l2tp/l2tp_core.c
  * ipv4: fill fl4_icmp_{type,code} in ping_v4_sendmsg
      net/ipv4/ping.c
    s390/mm: fix huge pte soft dirty copying
    ARC: elf: use right ELF_ARCH
    ARC: entry: fix potential EFA clobber when TIF_SYSCALL_TRACE
    drm/radeon: fix double free
    btrfs: fix fatal extent_buffer readahead vs releasepage race
    Revert "ath9k: Fix general protection fault in ath9k_hif_usb_rx_cb"
    KVM: x86: bit 8 of non-leaf PDPEs is not reserved
  * KVM: arm64: Fix definition of PAGE_HYP_DEVICE
      arch/arm64/include/asm/pgtable-prot.h
  * ALSA: usb-audio: add quirk for MacroSilicon MS2109
      sound/usb/quirks-table.h
    ALSA: hda - let hs_mic be picked ahead of hp_mic
    ALSA: opl3: fix infoleak in opl3
    net: macb: mark device wake capable when "magic-packet" property present
    bnxt_en: fix NULL dereference in case SR-IOV configuration fails
  * arm64: kgdb: Fix single-step exception handling oops
      arch/arm64/kernel/kgdb.c
  * ALSA: compress: fix partial_drain completion state
      include/sound/compress_driver.h
      sound/core/compress_offload.c
    smsc95xx: avoid memory leak in smsc95xx_bind
    smsc95xx: check return value of smsc95xx_reset
    net: cxgb4: fix return error value in t4_prep_fw
    scsi: mptscsih: Fix read sense data size
    ARM: imx6: add missing put_device() call in imx6q_suspend_init()
    cifs: update ctime and mtime during truncate
    s390/kasan: fix early pgm check handler execution
  * spi: spidev: fix a potential use-after-free in spidev_release()
      drivers/spi/spidev.c
  * spi: spidev: fix a race between spidev_release and spidev_remove
      drivers/spi/spidev.c
    gpu: host1x: Detach driver on unregister
    KVM: s390: reduce number of IO pins to 1
    ANDROID: cuttlefish_defconfig: Drop built-in cmdline (except nopti)
    Merge 4.9.230 into android-4.9-q
Linux 4.9.230
  * efi: Make it possible to disable efivar_ssdt entirely
      drivers/firmware/efi/Kconfig
  * netfilter: nf_conntrack_h323: lost .data_len definition for Q.931/ipv6
      net/netfilter/nf_conntrack_h323_main.c
    MIPS: Add missing EHB in mtc0 -> mfc0 sequence for DSPen
    cifs: Fix the target file was deleted when rename failed.
    SMB3: Honor persistent/resilient handle flags for multiuser mounts
    SMB3: Honor 'seal' flag for multiuser mounts
  * Revert "ALSA: usb-audio: Improve frames size computation"
      sound/usb/card.h
      sound/usb/endpoint.c
      sound/usb/endpoint.h
      sound/usb/pcm.c
    i2c: algo-pca: Add 0x78 as SCL stuck low status for PCA9665
    virtio-blk: free vblk-vqs in error path of virtblk_probe()
    hwmon: (acpi_power_meter) Fix potential memory leak in acpi_power_meter_add()
    hwmon: (max6697) Make sure the OVERT mask is set correctly
    cxgb4: parse TC-U32 key values and masks natively
  * sched/rt: Show the 'sched_rr_timeslice' SCHED_RR timeslice tuning knob in milliseconds
      include/linux/sched/sysctl.h
      kernel/sched/core.c
      kernel/sched/rt.c
      kernel/sysctl.c
    crypto: af_alg - fix use-after-free in af_alg_accept() due to bh_lock_sock()
  * kgdb: Avoid suspicious RCU usage warning
      kernel/debug/debug_core.c
    usb: usbtest: fix missing kfree(dev->buf) in usbtest_disconnect
  * mm/slub: fix stack overruns with SLUB_STATS
      mm/slub.c
  * mm/slub.c: fix corrupted freechain in deactivate_slab()
      mm/slub.c
    usbnet: smsc95xx: Fix use-after-free after removal
    EDAC/amd64: Read back the scrub rate PCI register on F15h
  * mm: fix swap cache node allocation mask
      mm/swap_state.c
    btrfs: fix data block group relocation failure due to concurrent scrub
    btrfs: cow_file_range() num_bytes and disk_num_bytes are same
    btrfs: fix a block group ref counter leak after failure to remove block group
    Merge 4.9.229 into android-4.9-q
Linux 4.9.229
    Revert "tty: hvc: Fix data abort due to race in hvc_open"
    xfs: add agf freeblocks verify in xfs_agf_verify
    NFSv4 fix CLOSE not waiting for direct IO compeletion
    pNFS/flexfiles: Fix list corruption if the mirror count changes
    SUNRPC: Properly set the @subbuf parameter of xdr_buf_subsegment()
    sunrpc: fixed rollback in rpc_gssd_dummy_populate()
    drm/radeon: fix fb_div check in ni_init_smc_spll_table()
  * tracing: Fix event trigger to accept redundant spaces
      kernel/trace/trace_events_trigger.c
  * arm64: perf: Report the PC value in REGS_ABI_32 mode
      arch/arm64/kernel/perf_regs.c
    ocfs2: fix panic on nfs server over ocfs2
    ocfs2: fix value of OCFS2_INVALID_SLOT
    ocfs2: load global_inode_alloc
  * mm/slab: use memzero_explicit() in kzfree()
      mm/slab_common.c
    KVM: X86: Fix MSR range of APIC registers in X2APIC mode
    ACPI: sysfs: Fix pm_profile_attr type
    ALSA: hda: Add NVIDIA codec IDs 9a & 9d through a0 to patch table
  * blktrace: break out of blktrace setup on concurrent calls
      kernel/trace/blktrace.c
  * kbuild: improve cc-option to clean up all temporary files
      scripts/Kbuild.include
    s390/ptrace: fix setting syscall number
    net: alx: fix race condition in alx_remove
    ata/libata: Fix usage of page address by page_address in ata_scsi_mode_select_xlat function
  * sched/core: Fix PI boosting between RT and DEADLINE tasks
      kernel/sched/core.c
    netfilter: ipset: fix unaligned atomic access
    usb: gadget: udc: Potential Oops in error handling code
    ARM: imx5: add missing put_device() call in imx_suspend_alloc_ocram()
    net: qed: fix excessive QM ILT lines consumption
    net: qed: fix NVMe login fails over VFs
    net: qed: fix left elements count calculation
    RDMA/mad: Fix possible memory leak in ib_mad_post_receive_mads()
    efi/esrt: Fix reference count leak in esre_create_sysfs_entry.
    cifs/smb3: Fix data inconsistent when zero file range
    cifs/smb3: Fix data inconsistent when punch hole
  * xhci: Poll for U0 after disabling USB2 LPM
      drivers/usb/host/xhci.c
  * ALSA: usb-audio: Fix OOB access of mixer element list
      sound/usb/mixer.c
      sound/usb/mixer.h
      sound/usb/mixer_quirks.c
  * ALSA: usb-audio: Clean up mixer element list traverse
      sound/usb/mixer.c
      sound/usb/mixer.h
      sound/usb/mixer_quirks.c
      sound/usb/mixer_scarlett.c
  * ALSA: usb-audio: uac1: Invalidate ctl on interrupt
      sound/usb/mixer.c
    cdc-acm: Add DISABLE_ECHO quirk for Microchip/SMSC chip
  * xhci: Fix enumeration issue when setting max packet size for FS devices.
      drivers/usb/host/xhci.c
  * xhci: Fix incorrect EP_STATE_MASK
      drivers/usb/host/xhci.h
  * ALSA: usb-audio: add quirk for Denon DCD-1500RE
      sound/usb/quirks.c
    usb: host: ehci-exynos: Fix error check in exynos_ehci_probe()
    USB: ehci: reopen solution for Synopsys HC bug
  * usb: add USB_QUIRK_DELAY_INIT for Logitech C922
      drivers/usb/core/quirks.c
    usb: dwc2: Postponed gadget registration to the udc class driver
    USB: ohci-sm501: Add missed iounmap() in remove
  * net: core: reduce recursion limit value
      include/linux/netdevice.h
  * net: Do not clear the sock TX queue in sk_set_socket()
      include/net/sock.h
      net/core/sock.c
  * net: Fix the arp error in some cases
      net/ipv4/fib_semantics.c
  * sctp: Don't advertise IPv4 addresses if ipv6only is set on the socket
      include/net/sctp/constants.h
  * tcp: grow window for OOO packets only for SACK flows
      net/ipv4/tcp_input.c
    ip6_gre: fix use-after-free in ip6gre_tunnel_lookup()
  * tcp_cubic: fix spurious HYSTART_DELAY exit upon drop in min RTT
      net/ipv4/tcp_cubic.c
  * ip_tunnel: fix use-after-free in ip_tunnel_lookup()
      net/ipv4/ip_tunnel.c
    tg3: driver sleeps indefinitely when EEH errors exceed eeh_max_freezes
    rxrpc: Fix notification call on completion of discarded calls
    rocker: fix incorrect error handling in dma_rings_init
  * net: usb: ax88179_178a: fix packet alignment padding
      drivers/net/usb/ax88179_178a.c
  * net: fix memleak in register_netdevice()
      net/core/dev.c
  * mld: fix memory leak in ipv6_mc_destroy_dev()
      net/ipv6/mcast.c
    fix a braino in "sparc32: fix register window handling in genregs32_[gs]et()"
  * net: sched: export __netdev_watchdog_up()
      net/sched/sch_generic.c
  * l2tp: Allow duplicate session creation with UDP
      net/l2tp/l2tp_core.c
  * scsi: scsi_devinfo: handle non-terminated strings
      drivers/scsi/scsi_devinfo.c
    mtd: rawnand: tmio: Fix the probe error path
    mtd: rawnand: mtk: Fix the probe error path
    mtd: rawnand: plat_nand: Fix the probe error path
    mtd: rawnand: socrates: Fix the probe error path
    mtd: rawnand: orion: Fix the probe error path
    mtd: rawnand: xway: Fix the probe error path
    mtd: rawnand: sharpsl: Fix the probe error path
    mtd: rawnand: diskonchip: Fix the probe error path
    mtd: rawnand: Pass a nand_chip object to nand_release()
  * media: dvb_frontend: fix return error code
      drivers/media/dvb-core/dvb_frontend.c
  * media: dvb_frontend: fix wrong cast in compat_ioctl
      drivers/media/dvb-core/dvb_frontend.c
  * media: dvb_frontend: Add commands implementation for compat ioct
      drivers/media/dvb-core/dvb_frontend.c
  * media: dvb_frontend: Add compat_ioctl callback
      drivers/media/dvb-core/dvb_frontend.c
      fs/compat_ioctl.c
  * media: dvb_frontend: Add unlocked_ioctl in dvb_frontend.c
      drivers/media/dvb-core/dvb_frontend.c
  * media: dvb_frontend: be sure to init dvb_frontend_handle_ioctl() return code
      drivers/media/dvb-core/dvb_frontend.c
  * media: dvb_frontend: dtv_property_process_set() cleanups
      drivers/media/dvb-core/dvb_frontend.c
  * media: dvb_frontend: fix return values for FE_SET_PROPERTY
      drivers/media/dvb-core/dvb_frontend.c
      include/uapi/linux/dvb/frontend.h
  * media: dvb_frontend: better document the -EPERM condition
      drivers/media/dvb-core/dvb_frontend.c
  * media: dvb_frontend: get rid of property cache's state
      drivers/media/dvb-core/dvb_frontend.c
      drivers/media/dvb-core/dvb_frontend.h
  * media: dvb_frontend: cleanup ioctl handling logic
      drivers/media/dvb-core/dvb_frontend.c
  * media: dvb_frontend: cleanup dvb_frontend_ioctl_properties()
      drivers/media/dvb-core/dvb_frontend.c
  * media: dvb_frontend: get rid of set_property() callback
      drivers/media/dvb-core/dvb_frontend.c
      drivers/media/dvb-core/dvb_frontend.h
    media: friio-fe: get rid of set_property()
    media: stv6110: get rid of a srate dead code
    media: stv0288: get rid of set_property boilerplate
  * media: dvb_frontend: get rid of get_property() callback
      drivers/media/dvb-core/dvb_frontend.c
      drivers/media/dvb-core/dvb_frontend.h
  * media: dvb/frontend.h: document the uAPI file
      include/uapi/linux/dvb/frontend.h
  * media: dvb/frontend.h: move out a private internal structure
      drivers/media/dvb-core/dvb_frontend.c
      include/uapi/linux/dvb/frontend.h
  * media: dvb_frontend: initialize variable s with FE_NONE instead of 0
      drivers/media/dvb-core/dvb_frontend.c
  * net: core: device_rename: Use rwsem instead of a seqcount
      net/core/dev.c
  * sched/rt, net: Use CONFIG_PREEMPTION.patch
      net/core/dev.c
    e1000e: Do not wake up the system via WOL if device wakeup is disabled
  * kretprobe: Prevent triggering kretprobe from within kprobe_flush_task
      include/linux/kprobes.h
    x86/kprobes: Avoid kretprobe recursion bug
    powerpc/kprobes: Fixes for kprobe_lookup_name() on BE
    kprobes: Fix to protect kick_kprobe_optimizer() by kprobe_mutex
  * crypto: algboss - don't wait during notifier callback
      crypto/algboss.c
    drm/i915: Whitelist context-local timestamp in the gen9 cmdparser
    s390: fix syscall_get_error for compat processes
  * block: nr_sects_write(): Disable preemption on seqcount write
      include/linux/genhd.h
    x86/boot/compressed: Relax sed symbol type regex for LLVM ld.lld
  * drm/dp_mst: Increase ACT retry timeout to 3s
      drivers/gpu/drm/drm_dp_mst_topology.c
  * ext4: fix partial cluster initialization when splitting extent
      fs/ext4/extents.c
  * selinux: fix double free
      security/selinux/ss/services.c
    drm/qxl: Use correct notify port address when creating cursor ring
  * drm/dp_mst: Reformat drm_dp_check_act_status() a bit
      drivers/gpu/drm/drm_dp_mst_topology.c
  * drm: encoder_slave: fix refcouting error for modules
      drivers/gpu/drm/drm_encoder_slave.c
    libata: Use per port sync for detach
  * block: Fix use-after-free in blkdev_get()
      fs/block_dev.c
    bcache: fix potential deadlock problem in btree_gc_coalesce
    perf report: Fix NULL pointer dereference in hists__fprintf_nr_sample_events()
    usb/ehci-platform: Set PM runtime as active on resume
  * usb/xhci-plat: Set PM runtime as active on resume
      drivers/usb/host/xhci-plat.c
    scsi: acornscsi: Fix an error handling path in acornscsi_probe()
    selftests/net: in timestamping, strncpy needs to preserve null byte
    selftests/vm/pkeys: fix alloc_random_pkey() to make it really random
  * elfnote: mark all .note sections SHF_ALLOC
      include/linux/elfnote.h
  * include/linux/bitops.h: avoid clang shift-count-overflow warnings
      include/linux/bitops.h
  * lib/zlib: remove outdated and incorrect pre-increment optimization
      lib/zlib_inflate/inffast.c
    crypto: omap-sham - add proper load balancing support for multicore
    pinctrl: imxl: Fix an error handling path in 'imx1_pinctrl_core_probe()'
    scsi: iscsi: Fix reference count leak in iscsi_boot_create_kobj
    gfs2: Allow lock_nolock mount to specify jid=X
    openrisc: Fix issue with argument clobbering for clone/fork
    ASoC: fsl_asrc_dma: Fix dma_chan leak when config DMA channel failed
    extcon: adc-jack: Fix an error handling path in 'adc_jack_probe()'
    NFSv4.1 fix rpc_call_done assignment for BIND_CONN_TO_SESSION
    net: sunrpc: Fix off-by-one issues in 'rpc_ntop6'
    clk: bcm2835: Fix return type of bcm2835_register_gate
  * usb: gadget: Fix issue with config_ep_by_speed function
      drivers/usb/gadget/composite.c
      include/linux/usb/composite.h
    usb: gadget: fix potential double-free in m66592_probe.
    usb: gadget: lpc32xx_udc: don't dereference ep pointer before null check
    USB: gadget: udc: s3c2410_udc: Remove pointless NULL check in s3c2410_udc_nuke
    usb: dwc2: gadget: move gadget resume after the core is in L0 state
    watchdog: da9062: No need to ping manually before setting timeout
    IB/cma: Fix ports memory leak in cma_configfs
    PCI/PTM: Inherit Switch Downstream Port PTM settings from Upstream Port
    powerpc/64s/pgtable: fix an undefined behaviour
    clk: samsung: exynos5433: Add IGNORE_UNUSED flag to sclk_i2s1
    tty: n_gsm: Fix bogus i++ in gsm_data_kick
    USB: host: ehci-mxc: Add error handling in ehci_mxc_drv_probe()
    drm/msm/mdp5: Fix mdp5_init error path for failed mdp5_kms allocation
    usb/ohci-platform: Fix a warning when hibernating
    vfio-pci: Mask cap zero
    powerpc/ps3: Fix kexec shutdown hang
    powerpc/pseries/ras: Fix FWNMI_VALID off by one
    tty: n_gsm: Fix waking up upper tty layer when room available
    tty: n_gsm: Fix SOF skipping
    clk: ti: composite: fix memory leak
    dlm: remove BUG() before panic()
    scsi: mpt3sas: Fix double free warnings
    power: supply: smb347-charger: IRQSTAT_D is volatile
    power: supply: lp8788: Fix an error handling path in 'lp8788_charger_probe()'
    PCI/ASPM: Allow ASPM on links to PCIe-to-PCI/PCI-X Bridges
    PCI: rcar: Fix incorrect programming of OB windows
  * drivers: base: Fix NULL pointer exception in __platform_driver_probe() if a driver developer is foolish
      drivers/base/platform.c
    serial: amba-pl011: Make sure we initialize the port.lock spinlock
    i2c: pxa: fix i2c_pxa_scream_blue_murder() debug output
    staging: sm750fb: add missing case while setting FB_VISUAL
    tty: hvc: Fix data abort due to race in hvc_open
    s390/qdio: put thinint indicator after early error
  * ALSA: usb-audio: Improve frames size computation
      sound/usb/card.h
      sound/usb/endpoint.c
      sound/usb/endpoint.h
      sound/usb/pcm.c
    scsi: ibmvscsi: Don't send host info in adapter info MAD after LPM
    scsi: sr: Fix sr_probe() missing deallocate of device minor
  * mksysmap: Fix the mismatch of '.L' symbols in System.map
      scripts/mksysmap
    yam: fix possible memory leak in yam_init_driver
    powerpc/crashkernel: Take "mem=" option into account
    nfsd: Fix svc_xprt refcnt leak when setup callback client failed
    powerpc/perf/hv-24x7: Fix inconsistent output values incase multiple hv-24x7 events run
    clk: clk-flexgen: fix clock-critical handling
    scsi: lpfc: Fix lpfc_nodelist leak when processing unsolicited event
    mfd: wm8994: Fix driver operation if loaded as modules
    vfio/pci: fix memory leaks in alloc_perm_bits()
    ps3disk: use the default segment boundary
    PCI: aardvark: Don't blindly enable ASPM L0s and don't write to read-only register
    usblp: poison URBs upon disconnect
    i2c: pxa: clear all master action bits in i2c_pxa_stop_message()
    iio: bmp280: fix compensation of humidity
    scsi: qla2xxx: Fix issue with adapter's stopping state
    ALSA: isa/wavefront: prevent out of bounds write in ioctl
    ARM: integrator: Add some Kconfig selections
    backlight: lp855x: Ensure regulators are disabled on probe failure
    clk: qcom: msm8916: Fix the address location of pll->config_reg
    iio: pressure: bmp280: Tolerate IRQ before registering
    i2c: piix4: Detect secondary SMBus controller on AMD AM4 chipsets
    clk: sunxi: Fix incorrect usage of round_down()
  * power: supply: bq24257_charger: Replace depends on REGMAP_I2C with select
      drivers/power/supply/Kconfig

Change-Id: I9fdac4691b013061a19d375293b7049b999830d0
Signed-off-by: lucaswei <lucaswei@google.com>
2020-08-11 20:25:36 +08:00

2678 lines
64 KiB
C

/*
* Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
* policies)
*/
#include "sched.h"
#include "walt.h"
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/irq_work.h>
#include <trace/events/sched.h>
#include "walt.h"
int sched_rr_timeslice = RR_TIMESLICE;
int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
struct rt_bandwidth def_rt_bandwidth;
static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
{
struct rt_bandwidth *rt_b =
container_of(timer, struct rt_bandwidth, rt_period_timer);
int idle = 0;
int overrun;
raw_spin_lock(&rt_b->rt_runtime_lock);
for (;;) {
overrun = hrtimer_forward_now(timer, rt_b->rt_period);
if (!overrun)
break;
raw_spin_unlock(&rt_b->rt_runtime_lock);
idle = do_sched_rt_period_timer(rt_b, overrun);
raw_spin_lock(&rt_b->rt_runtime_lock);
}
if (idle)
rt_b->rt_period_active = 0;
raw_spin_unlock(&rt_b->rt_runtime_lock);
return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
}
void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
{
rt_b->rt_period = ns_to_ktime(period);
rt_b->rt_runtime = runtime;
raw_spin_lock_init(&rt_b->rt_runtime_lock);
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rt_b->rt_period_timer.function = sched_rt_period_timer;
}
static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
{
if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
return;
raw_spin_lock(&rt_b->rt_runtime_lock);
if (!rt_b->rt_period_active) {
rt_b->rt_period_active = 1;
/*
* SCHED_DEADLINE updates the bandwidth, as a run away
* RT task with a DL task could hog a CPU. But DL does
* not reset the period. If a deadline task was running
* without an RT task running, it can cause RT tasks to
* throttle when they start up. Kick the timer right away
* to update the period.
*/
hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
}
raw_spin_unlock(&rt_b->rt_runtime_lock);
}
void init_rt_rq(struct rt_rq *rt_rq)
{
struct rt_prio_array *array;
int i;
array = &rt_rq->active;
for (i = 0; i < MAX_RT_PRIO; i++) {
INIT_LIST_HEAD(array->queue + i);
__clear_bit(i, array->bitmap);
}
/* delimiter for bitsearch: */
__set_bit(MAX_RT_PRIO, array->bitmap);
#if defined CONFIG_SMP
rt_rq->highest_prio.curr = MAX_RT_PRIO;
rt_rq->highest_prio.next = MAX_RT_PRIO;
rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0;
plist_head_init(&rt_rq->pushable_tasks);
#endif /* CONFIG_SMP */
/* We start is dequeued state, because no RT tasks are queued */
rt_rq->rt_queued = 0;
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
rt_rq->rt_runtime = 0;
raw_spin_lock_init(&rt_rq->rt_runtime_lock);
}
#ifdef CONFIG_RT_GROUP_SCHED
static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
{
hrtimer_cancel(&rt_b->rt_period_timer);
}
#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
{
#ifdef CONFIG_SCHED_DEBUG
WARN_ON_ONCE(!rt_entity_is_task(rt_se));
#endif
return container_of(rt_se, struct task_struct, rt);
}
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return rt_rq->rq;
}
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
return rt_se->rt_rq;
}
static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
{
struct rt_rq *rt_rq = rt_se->rt_rq;
return rt_rq->rq;
}
void free_rt_sched_group(struct task_group *tg)
{
int i;
if (tg->rt_se)
destroy_rt_bandwidth(&tg->rt_bandwidth);
for_each_possible_cpu(i) {
if (tg->rt_rq)
kfree(tg->rt_rq[i]);
if (tg->rt_se)
kfree(tg->rt_se[i]);
}
kfree(tg->rt_rq);
kfree(tg->rt_se);
}
void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
struct sched_rt_entity *rt_se, int cpu,
struct sched_rt_entity *parent)
{
struct rq *rq = cpu_rq(cpu);
rt_rq->highest_prio.curr = MAX_RT_PRIO;
rt_rq->rt_nr_boosted = 0;
rt_rq->rq = rq;
rt_rq->tg = tg;
tg->rt_rq[cpu] = rt_rq;
tg->rt_se[cpu] = rt_se;
if (!rt_se)
return;
if (!parent)
rt_se->rt_rq = &rq->rt;
else
rt_se->rt_rq = parent->my_q;
rt_se->my_q = rt_rq;
rt_se->parent = parent;
INIT_LIST_HEAD(&rt_se->run_list);
}
int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
struct rt_rq *rt_rq;
struct sched_rt_entity *rt_se;
int i;
tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
if (!tg->rt_rq)
goto err;
tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
if (!tg->rt_se)
goto err;
init_rt_bandwidth(&tg->rt_bandwidth,
ktime_to_ns(def_rt_bandwidth.rt_period), 0);
for_each_possible_cpu(i) {
rt_rq = kzalloc_node(sizeof(struct rt_rq),
GFP_KERNEL, cpu_to_node(i));
if (!rt_rq)
goto err;
rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
GFP_KERNEL, cpu_to_node(i));
if (!rt_se)
goto err_free_rq;
init_rt_rq(rt_rq);
rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
}
return 1;
err_free_rq:
kfree(rt_rq);
err:
return 0;
}
#else /* CONFIG_RT_GROUP_SCHED */
#define rt_entity_is_task(rt_se) (1)
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
{
return container_of(rt_se, struct task_struct, rt);
}
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return container_of(rt_rq, struct rq, rt);
}
static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
{
struct task_struct *p = rt_task_of(rt_se);
return task_rq(p);
}
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
struct rq *rq = rq_of_rt_se(rt_se);
return &rq->rt;
}
void free_rt_sched_group(struct task_group *tg) { }
int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
return 1;
}
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_SMP
static void pull_rt_task(struct rq *this_rq);
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
{
/*
* Try to pull RT tasks here if we lower this rq's prio and cpu is not
* isolated
*/
return rq->rt.highest_prio.curr > prev->prio &&
!cpu_isolated(cpu_of(rq));
}
static inline int rt_overloaded(struct rq *rq)
{
return atomic_read(&rq->rd->rto_count);
}
static inline void rt_set_overload(struct rq *rq)
{
if (!rq->online)
return;
cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
/*
* Make sure the mask is visible before we set
* the overload count. That is checked to determine
* if we should look at the mask. It would be a shame
* if we looked at the mask, but the mask was not
* updated yet.
*
* Matched by the barrier in pull_rt_task().
*/
smp_wmb();
atomic_inc(&rq->rd->rto_count);
}
static inline void rt_clear_overload(struct rq *rq)
{
if (!rq->online)
return;
/* the order here really doesn't matter */
atomic_dec(&rq->rd->rto_count);
cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
}
static void update_rt_migration(struct rt_rq *rt_rq)
{
if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
if (!rt_rq->overloaded) {
rt_set_overload(rq_of_rt_rq(rt_rq));
rt_rq->overloaded = 1;
}
} else if (rt_rq->overloaded) {
rt_clear_overload(rq_of_rt_rq(rt_rq));
rt_rq->overloaded = 0;
}
}
static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
struct task_struct *p;
if (!rt_entity_is_task(rt_se))
return;
p = rt_task_of(rt_se);
rt_rq = &rq_of_rt_rq(rt_rq)->rt;
rt_rq->rt_nr_total++;
if (tsk_nr_cpus_allowed(p) > 1)
rt_rq->rt_nr_migratory++;
update_rt_migration(rt_rq);
}
static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
struct task_struct *p;
if (!rt_entity_is_task(rt_se))
return;
p = rt_task_of(rt_se);
rt_rq = &rq_of_rt_rq(rt_rq)->rt;
rt_rq->rt_nr_total--;
if (tsk_nr_cpus_allowed(p) > 1)
rt_rq->rt_nr_migratory--;
update_rt_migration(rt_rq);
}
static inline int has_pushable_tasks(struct rq *rq)
{
return !plist_head_empty(&rq->rt.pushable_tasks);
}
static DEFINE_PER_CPU(struct callback_head, rt_push_head);
static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
static void push_rt_tasks(struct rq *);
static void pull_rt_task(struct rq *);
static inline void queue_push_tasks(struct rq *rq)
{
if (!has_pushable_tasks(rq))
return;
queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
}
static inline void queue_pull_task(struct rq *rq)
{
queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
}
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
{
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
plist_node_init(&p->pushable_tasks, p->prio);
plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
/* Update the highest prio pushable task */
if (p->prio < rq->rt.highest_prio.next)
rq->rt.highest_prio.next = p->prio;
}
static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
{
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
/* Update the new highest prio pushable task */
if (has_pushable_tasks(rq)) {
p = plist_first_entry(&rq->rt.pushable_tasks,
struct task_struct, pushable_tasks);
rq->rt.highest_prio.next = p->prio;
} else
rq->rt.highest_prio.next = MAX_RT_PRIO;
}
#else
static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
{
}
static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
{
}
static inline
void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
}
static inline
void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
}
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
{
return false;
}
static inline void pull_rt_task(struct rq *this_rq)
{
}
static inline void queue_push_tasks(struct rq *rq)
{
}
#endif /* CONFIG_SMP */
static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
static inline int on_rt_rq(struct sched_rt_entity *rt_se)
{
return rt_se->on_rq;
}
#ifdef CONFIG_RT_GROUP_SCHED
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{
if (!rt_rq->tg)
return RUNTIME_INF;
return rt_rq->rt_runtime;
}
static inline u64 sched_rt_period(struct rt_rq *rt_rq)
{
return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
}
typedef struct task_group *rt_rq_iter_t;
static inline struct task_group *next_task_group(struct task_group *tg)
{
do {
tg = list_entry_rcu(tg->list.next,
typeof(struct task_group), list);
} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
if (&tg->list == &task_groups)
tg = NULL;
return tg;
}
#define for_each_rt_rq(rt_rq, iter, rq) \
for (iter = container_of(&task_groups, typeof(*iter), list); \
(iter = next_task_group(iter)) && \
(rt_rq = iter->rt_rq[cpu_of(rq)]);)
#define for_each_sched_rt_entity(rt_se) \
for (; rt_se; rt_se = rt_se->parent)
static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
{
return rt_se->my_q;
}
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
{
struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
struct rq *rq = rq_of_rt_rq(rt_rq);
struct sched_rt_entity *rt_se;
int cpu = cpu_of(rq);
rt_se = rt_rq->tg->rt_se[cpu];
if (rt_rq->rt_nr_running) {
if (!rt_se)
enqueue_top_rt_rq(rt_rq);
else if (!on_rt_rq(rt_se))
enqueue_rt_entity(rt_se, 0);
if (rt_rq->highest_prio.curr < curr->prio)
resched_curr(rq);
}
}
static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
{
struct sched_rt_entity *rt_se;
int cpu = cpu_of(rq_of_rt_rq(rt_rq));
rt_se = rt_rq->tg->rt_se[cpu];
if (!rt_se)
dequeue_top_rt_rq(rt_rq);
else if (on_rt_rq(rt_se))
dequeue_rt_entity(rt_se, 0);
}
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
{
return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
}
static int rt_se_boosted(struct sched_rt_entity *rt_se)
{
struct rt_rq *rt_rq = group_rt_rq(rt_se);
struct task_struct *p;
if (rt_rq)
return !!rt_rq->rt_nr_boosted;
p = rt_task_of(rt_se);
return p->prio != p->normal_prio;
}
#ifdef CONFIG_SMP
static inline const struct cpumask *sched_rt_period_mask(void)
{
return this_rq()->rd->span;
}
#else
static inline const struct cpumask *sched_rt_period_mask(void)
{
return cpu_online_mask;
}
#endif
static inline
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
{
return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
}
static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
{
return &rt_rq->tg->rt_bandwidth;
}
#else /* !CONFIG_RT_GROUP_SCHED */
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{
return rt_rq->rt_runtime;
}
static inline u64 sched_rt_period(struct rt_rq *rt_rq)
{
return ktime_to_ns(def_rt_bandwidth.rt_period);
}
typedef struct rt_rq *rt_rq_iter_t;
#define for_each_rt_rq(rt_rq, iter, rq) \
for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
#define for_each_sched_rt_entity(rt_se) \
for (; rt_se; rt_se = NULL)
static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
{
return NULL;
}
static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
if (!rt_rq->rt_nr_running)
return;
enqueue_top_rt_rq(rt_rq);
resched_curr(rq);
}
static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
{
dequeue_top_rt_rq(rt_rq);
}
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
{
return rt_rq->rt_throttled;
}
static inline const struct cpumask *sched_rt_period_mask(void)
{
return cpu_online_mask;
}
static inline
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
{
return &cpu_rq(cpu)->rt;
}
static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
{
return &def_rt_bandwidth;
}
#endif /* CONFIG_RT_GROUP_SCHED */
bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
{
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
return (hrtimer_active(&rt_b->rt_period_timer) ||
rt_rq->rt_time < rt_b->rt_runtime);
}
#ifdef CONFIG_SMP
/*
* We ran out of runtime, see if we can borrow some from our neighbours.
*/
static void do_balance_runtime(struct rt_rq *rt_rq)
{
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
int i, weight;
u64 rt_period;
weight = cpumask_weight(rd->span);
raw_spin_lock(&rt_b->rt_runtime_lock);
rt_period = ktime_to_ns(rt_b->rt_period);
for_each_cpu(i, rd->span) {
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
s64 diff;
if (iter == rt_rq)
continue;
raw_spin_lock(&iter->rt_runtime_lock);
/*
* Either all rqs have inf runtime and there's nothing to steal
* or __disable_runtime() below sets a specific rq to inf to
* indicate its been disabled and disalow stealing.
*/
if (iter->rt_runtime == RUNTIME_INF)
goto next;
/*
* From runqueues with spare time, take 1/n part of their
* spare time, but no more than our period.
*/
diff = iter->rt_runtime - iter->rt_time;
if (diff > 0) {
diff = div_u64((u64)diff, weight);
if (rt_rq->rt_runtime + diff > rt_period)
diff = rt_period - rt_rq->rt_runtime;
iter->rt_runtime -= diff;
rt_rq->rt_runtime += diff;
if (rt_rq->rt_runtime == rt_period) {
raw_spin_unlock(&iter->rt_runtime_lock);
break;
}
}
next:
raw_spin_unlock(&iter->rt_runtime_lock);
}
raw_spin_unlock(&rt_b->rt_runtime_lock);
}
/*
* Ensure this RQ takes back all the runtime it lend to its neighbours.
*/
static void __disable_runtime(struct rq *rq)
{
struct root_domain *rd = rq->rd;
rt_rq_iter_t iter;
struct rt_rq *rt_rq;
if (unlikely(!scheduler_running))
return;
for_each_rt_rq(rt_rq, iter, rq) {
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
s64 want;
int i;
raw_spin_lock(&rt_b->rt_runtime_lock);
raw_spin_lock(&rt_rq->rt_runtime_lock);
/*
* Either we're all inf and nobody needs to borrow, or we're
* already disabled and thus have nothing to do, or we have
* exactly the right amount of runtime to take out.
*/
if (rt_rq->rt_runtime == RUNTIME_INF ||
rt_rq->rt_runtime == rt_b->rt_runtime)
goto balanced;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
/*
* Calculate the difference between what we started out with
* and what we current have, that's the amount of runtime
* we lend and now have to reclaim.
*/
want = rt_b->rt_runtime - rt_rq->rt_runtime;
/*
* Greedy reclaim, take back as much as we can.
*/
for_each_cpu(i, rd->span) {
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
s64 diff;
/*
* Can't reclaim from ourselves or disabled runqueues.
*/
if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
continue;
raw_spin_lock(&iter->rt_runtime_lock);
if (want > 0) {
diff = min_t(s64, iter->rt_runtime, want);
iter->rt_runtime -= diff;
want -= diff;
} else {
iter->rt_runtime -= want;
want -= want;
}
raw_spin_unlock(&iter->rt_runtime_lock);
if (!want)
break;
}
raw_spin_lock(&rt_rq->rt_runtime_lock);
/*
* We cannot be left wanting - that would mean some runtime
* leaked out of the system.
*/
BUG_ON(want);
balanced:
/*
* Disable all the borrow logic by pretending we have inf
* runtime - in which case borrowing doesn't make sense.
*/
rt_rq->rt_runtime = RUNTIME_INF;
rt_rq->rt_throttled = 0;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
raw_spin_unlock(&rt_b->rt_runtime_lock);
/* Make rt_rq available for pick_next_task() */
sched_rt_rq_enqueue(rt_rq);
}
}
static void __enable_runtime(struct rq *rq)
{
rt_rq_iter_t iter;
struct rt_rq *rt_rq;
if (unlikely(!scheduler_running))
return;
/*
* Reset each runqueue's bandwidth settings
*/
for_each_rt_rq(rt_rq, iter, rq) {
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
raw_spin_lock(&rt_b->rt_runtime_lock);
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_b->rt_runtime;
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
raw_spin_unlock(&rt_b->rt_runtime_lock);
}
}
static void balance_runtime(struct rt_rq *rt_rq)
{
if (!sched_feat(RT_RUNTIME_SHARE))
return;
if (rt_rq->rt_time > rt_rq->rt_runtime) {
raw_spin_unlock(&rt_rq->rt_runtime_lock);
do_balance_runtime(rt_rq);
raw_spin_lock(&rt_rq->rt_runtime_lock);
}
}
#else /* !CONFIG_SMP */
static inline void balance_runtime(struct rt_rq *rt_rq) {}
#endif /* CONFIG_SMP */
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
{
int i, idle = 1, throttled = 0;
const struct cpumask *span;
span = sched_rt_period_mask();
#ifdef CONFIG_RT_GROUP_SCHED
/*
* FIXME: isolated CPUs should really leave the root task group,
* whether they are isolcpus or were isolated via cpusets, lest
* the timer run on a CPU which does not service all runqueues,
* potentially leaving other CPUs indefinitely throttled. If
* isolation is really required, the user will turn the throttle
* off to kill the perturbations it causes anyway. Meanwhile,
* this maintains functionality for boot and/or troubleshooting.
*/
if (rt_b == &root_task_group.rt_bandwidth)
span = cpu_online_mask;
#endif
for_each_cpu(i, span) {
int enqueue = 0;
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
struct rq *rq = rq_of_rt_rq(rt_rq);
raw_spin_lock(&rq->lock);
update_rq_clock(rq);
if (rt_rq->rt_time) {
u64 runtime;
raw_spin_lock(&rt_rq->rt_runtime_lock);
if (rt_rq->rt_throttled)
balance_runtime(rt_rq);
runtime = rt_rq->rt_runtime;
rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
rt_rq->rt_throttled = 0;
enqueue = 1;
/*
* When we're idle and a woken (rt) task is
* throttled check_preempt_curr() will set
* skip_update and the time between the wakeup
* and this unthrottle will get accounted as
* 'runtime'.
*/
if (rt_rq->rt_nr_running && rq->curr == rq->idle)
rq_clock_skip_update(rq, false);
}
if (rt_rq->rt_time || rt_rq->rt_nr_running)
idle = 0;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
} else if (rt_rq->rt_nr_running) {
idle = 0;
if (!rt_rq_throttled(rt_rq))
enqueue = 1;
}
if (rt_rq->rt_throttled)
throttled = 1;
if (enqueue)
sched_rt_rq_enqueue(rt_rq);
raw_spin_unlock(&rq->lock);
}
if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
return 1;
return idle;
}
static inline int rt_se_prio(struct sched_rt_entity *rt_se)
{
#ifdef CONFIG_RT_GROUP_SCHED
struct rt_rq *rt_rq = group_rt_rq(rt_se);
if (rt_rq)
return rt_rq->highest_prio.curr;
#endif
return rt_task_of(rt_se)->prio;
}
static void dump_throttled_rt_tasks(struct rt_rq *rt_rq)
{
struct rt_prio_array *array = &rt_rq->active;
struct sched_rt_entity *rt_se;
char buf[500];
char *pos = buf;
char *end = buf + sizeof(buf);
int idx;
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
pos += snprintf(pos, sizeof(buf),
"sched: RT throttling activated for rt_rq %pK (cpu %d)\n",
rt_rq, cpu_of(rq_of_rt_rq(rt_rq)));
pos += snprintf(pos, end - pos,
"rt_period_timer: expires=%lld now=%llu period=%llu\n",
hrtimer_get_expires_ns(&rt_b->rt_period_timer),
ktime_get_ns(), sched_rt_period(rt_rq));
if (bitmap_empty(array->bitmap, MAX_RT_PRIO))
goto out;
pos += snprintf(pos, end - pos, "potential CPU hogs:\n");
#ifdef CONFIG_SCHED_INFO
if (sched_info_on())
pos += snprintf(pos, end - pos,
"current %s (%d) is running for %llu nsec\n",
current->comm, current->pid,
rq_clock(rq_of_rt_rq(rt_rq)) -
current->sched_info.last_arrival);
#endif
idx = sched_find_first_bit(array->bitmap);
while (idx < MAX_RT_PRIO) {
list_for_each_entry(rt_se, array->queue + idx, run_list) {
struct task_struct *p;
if (!rt_entity_is_task(rt_se))
continue;
p = rt_task_of(rt_se);
if (pos < end)
pos += snprintf(pos, end - pos, "\t%s (%d)\n",
p->comm, p->pid);
}
idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx + 1);
}
out:
#ifdef CONFIG_PANIC_ON_RT_THROTTLING
/*
* Use pr_err() in the BUG() case since printk_sched() will
* not get flushed and deadlock is not a concern.
*/
pr_err("%s", buf);
BUG();
#else
printk_deferred("%s", buf);
#endif
}
static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
{
u64 runtime = sched_rt_runtime(rt_rq);
if (rt_rq->rt_throttled)
return rt_rq_throttled(rt_rq);
if (runtime >= sched_rt_period(rt_rq))
return 0;
balance_runtime(rt_rq);
runtime = sched_rt_runtime(rt_rq);
if (runtime == RUNTIME_INF)
return 0;
if (rt_rq->rt_time > runtime) {
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
/*
* Don't actually throttle groups that have no runtime assigned
* but accrue some time due to boosting.
*/
if (likely(rt_b->rt_runtime)) {
static bool once = false;
rt_rq->rt_throttled = 1;
if (!once) {
once = true;
dump_throttled_rt_tasks(rt_rq);
}
} else {
/*
* In case we did anyway, make it go away,
* replenishment is a joke, since it will replenish us
* with exactly 0 ns.
*/
rt_rq->rt_time = 0;
}
if (rt_rq_throttled(rt_rq)) {
sched_rt_rq_dequeue(rt_rq);
return 1;
}
}
return 0;
}
/*
* Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class.
*/
static void update_curr_rt(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct sched_rt_entity *rt_se = &curr->rt;
u64 delta_exec;
if (curr->sched_class != &rt_sched_class)
return;
delta_exec = rq_clock_task(rq) - curr->se.exec_start;
if (unlikely((s64)delta_exec <= 0))
return;
/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
schedstat_set(curr->se.statistics.exec_max,
max(curr->se.statistics.exec_max, delta_exec));
curr->se.sum_exec_runtime += delta_exec;
account_group_exec_runtime(curr, delta_exec);
curr->se.exec_start = rq_clock_task(rq);
cpuacct_charge(curr, delta_exec);
sched_rt_avg_update(rq, delta_exec);
if (!rt_bandwidth_enabled())
return;
for_each_sched_rt_entity(rt_se) {
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_time += delta_exec;
if (sched_rt_runtime_exceeded(rt_rq))
resched_curr(rq);
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
}
}
static void
dequeue_top_rt_rq(struct rt_rq *rt_rq)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
BUG_ON(&rq->rt != rt_rq);
if (!rt_rq->rt_queued)
return;
BUG_ON(!rq->nr_running);
sub_nr_running(rq, rt_rq->rt_nr_running);
rt_rq->rt_queued = 0;
}
static void
enqueue_top_rt_rq(struct rt_rq *rt_rq)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
BUG_ON(&rq->rt != rt_rq);
if (rt_rq->rt_queued)
return;
if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
return;
add_nr_running(rq, rt_rq->rt_nr_running);
rt_rq->rt_queued = 1;
}
#if defined CONFIG_SMP
static void
inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Change rq's cpupri only if rt_rq is the top queue.
*/
if (&rq->rt != rt_rq)
return;
#endif
if (rq->online && prio < prev_prio)
cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
}
static void
dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Change rq's cpupri only if rt_rq is the top queue.
*/
if (&rq->rt != rt_rq)
return;
#endif
if (rq->online && rt_rq->highest_prio.curr != prev_prio)
cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
}
#else /* CONFIG_SMP */
static inline
void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
static inline
void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
#endif /* CONFIG_SMP */
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
static void
inc_rt_prio(struct rt_rq *rt_rq, int prio)
{
int prev_prio = rt_rq->highest_prio.curr;
if (prio < prev_prio)
rt_rq->highest_prio.curr = prio;
inc_rt_prio_smp(rt_rq, prio, prev_prio);
}
static void
dec_rt_prio(struct rt_rq *rt_rq, int prio)
{
int prev_prio = rt_rq->highest_prio.curr;
if (rt_rq->rt_nr_running) {
WARN_ON(prio < prev_prio);
/*
* This may have been our highest task, and therefore
* we may have some recomputation to do
*/
if (prio == prev_prio) {
struct rt_prio_array *array = &rt_rq->active;
rt_rq->highest_prio.curr =
sched_find_first_bit(array->bitmap);
}
} else
rt_rq->highest_prio.curr = MAX_RT_PRIO;
dec_rt_prio_smp(rt_rq, prio, prev_prio);
}
#else
static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
static void
inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
if (rt_se_boosted(rt_se))
rt_rq->rt_nr_boosted++;
if (rt_rq->tg)
start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
}
static void
dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
if (rt_se_boosted(rt_se))
rt_rq->rt_nr_boosted--;
WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
}
#else /* CONFIG_RT_GROUP_SCHED */
static void
inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
start_rt_bandwidth(&def_rt_bandwidth);
}
static inline
void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
#endif /* CONFIG_RT_GROUP_SCHED */
static inline
unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
{
struct rt_rq *group_rq = group_rt_rq(rt_se);
if (group_rq)
return group_rq->rt_nr_running;
else
return 1;
}
static inline
unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
{
struct rt_rq *group_rq = group_rt_rq(rt_se);
struct task_struct *tsk;
if (group_rq)
return group_rq->rr_nr_running;
tsk = rt_task_of(rt_se);
return (tsk->policy == SCHED_RR) ? 1 : 0;
}
static inline
void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
int prio = rt_se_prio(rt_se);
WARN_ON(!rt_prio(prio));
rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
inc_rt_prio(rt_rq, prio);
inc_rt_migration(rt_se, rt_rq);
inc_rt_group(rt_se, rt_rq);
}
static inline
void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
WARN_ON(!rt_prio(rt_se_prio(rt_se)));
WARN_ON(!rt_rq->rt_nr_running);
rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
dec_rt_prio(rt_rq, rt_se_prio(rt_se));
dec_rt_migration(rt_se, rt_rq);
dec_rt_group(rt_se, rt_rq);
}
/*
* Change rt_se->run_list location unless SAVE && !MOVE
*
* assumes ENQUEUE/DEQUEUE flags match
*/
static inline bool move_entity(unsigned int flags)
{
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
return false;
return true;
}
static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
{
list_del_init(&rt_se->run_list);
if (list_empty(array->queue + rt_se_prio(rt_se)))
__clear_bit(rt_se_prio(rt_se), array->bitmap);
rt_se->on_list = 0;
}
static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active;
struct rt_rq *group_rq = group_rt_rq(rt_se);
struct list_head *queue = array->queue + rt_se_prio(rt_se);
/*
* Don't enqueue the group if its throttled, or when empty.
* The latter is a consequence of the former when a child group
* get throttled and the current group doesn't have any other
* active members.
*/
if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
if (rt_se->on_list)
__delist_rt_entity(rt_se, array);
return;
}
if (move_entity(flags)) {
WARN_ON_ONCE(rt_se->on_list);
if (flags & ENQUEUE_HEAD)
list_add(&rt_se->run_list, queue);
else
list_add_tail(&rt_se->run_list, queue);
__set_bit(rt_se_prio(rt_se), array->bitmap);
rt_se->on_list = 1;
}
rt_se->on_rq = 1;
inc_rt_tasks(rt_se, rt_rq);
}
static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active;
if (move_entity(flags)) {
WARN_ON_ONCE(!rt_se->on_list);
__delist_rt_entity(rt_se, array);
}
rt_se->on_rq = 0;
dec_rt_tasks(rt_se, rt_rq);
}
/*
* Because the prio of an upper entry depends on the lower
* entries, we must remove entries top - down.
*/
static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct sched_rt_entity *back = NULL;
for_each_sched_rt_entity(rt_se) {
rt_se->back = back;
back = rt_se;
}
dequeue_top_rt_rq(rt_rq_of_se(back));
for (rt_se = back; rt_se; rt_se = rt_se->back) {
if (on_rt_rq(rt_se))
__dequeue_rt_entity(rt_se, flags);
}
}
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct rq *rq = rq_of_rt_se(rt_se);
dequeue_rt_stack(rt_se, flags);
for_each_sched_rt_entity(rt_se)
__enqueue_rt_entity(rt_se, flags);
enqueue_top_rt_rq(&rq->rt);
}
static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct rq *rq = rq_of_rt_se(rt_se);
dequeue_rt_stack(rt_se, flags);
for_each_sched_rt_entity(rt_se) {
struct rt_rq *rt_rq = group_rt_rq(rt_se);
if (rt_rq && rt_rq->rt_nr_running)
__enqueue_rt_entity(rt_se, flags);
}
enqueue_top_rt_rq(&rq->rt);
}
/*
* Adding/removing a task to/from a priority array:
*/
static void
enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_rt_entity *rt_se = &p->rt;
if (flags & ENQUEUE_WAKEUP)
rt_se->timeout = 0;
enqueue_rt_entity(rt_se, flags);
walt_inc_cumulative_runnable_avg(rq, p);
if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
enqueue_pushable_task(rq, p);
}
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_rt_entity *rt_se = &p->rt;
update_curr_rt(rq);
dequeue_rt_entity(rt_se, flags);
walt_dec_cumulative_runnable_avg(rq, p);
dequeue_pushable_task(rq, p);
}
/*
* Put task to the head or the end of the run list without the overhead of
* dequeue followed by enqueue.
*/
static void
requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
{
if (on_rt_rq(rt_se)) {
struct rt_prio_array *array = &rt_rq->active;
struct list_head *queue = array->queue + rt_se_prio(rt_se);
if (head)
list_move(&rt_se->run_list, queue);
else
list_move_tail(&rt_se->run_list, queue);
}
}
static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
{
struct sched_rt_entity *rt_se = &p->rt;
struct rt_rq *rt_rq;
for_each_sched_rt_entity(rt_se) {
rt_rq = rt_rq_of_se(rt_se);
requeue_rt_entity(rt_rq, rt_se, head);
}
}
static void yield_task_rt(struct rq *rq)
{
requeue_task_rt(rq, rq->curr, 0);
}
#ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task);
/*
* Return whether the task on the given cpu is currently non-preemptible
* while handling a potentially long softint, or if the task is likely
* to block preemptions soon because (a) it is a ksoftirq thread that is
* handling slow softints, (b) it is idle and therefore likely to start
* processing the irqs immediately, (c) the cpu is currently handling hard irqs
* and will soon move on to the softirq handler.
*/
bool
task_may_not_preempt(struct task_struct *task, int cpu)
{
__u32 softirqs = per_cpu(active_softirqs, cpu) |
__IRQ_STAT(cpu, __softirq_pending);
struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
return ((softirqs & LONG_SOFTIRQ_MASK) &&
(task == cpu_ksoftirqd || is_idle_task(task) ||
(task_thread_info(task)->preempt_count
& (HARDIRQ_MASK | SOFTIRQ_MASK))));
}
static int
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
{
struct task_struct *curr, *tgt_task;
struct rq *rq;
bool may_not_preempt;
/* For anything but wake ups, just return the task_cpu */
if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
goto out;
rq = cpu_rq(cpu);
rcu_read_lock();
curr = READ_ONCE(rq->curr); /* unlocked access */
/*
* If the current task on @p's runqueue is a softirq task,
* it may run without preemption for a time that is
* ill-suited for a waiting RT task. Therefore, try to
* wake this RT task on another runqueue.
*
* Also, if the current task on @p's runqueue is an RT task, then
* it may run without preemption for a time that is
* ill-suited for a waiting RT task. Therefore, try to
* wake this RT task on another runqueue.
*
* Also, if the current task on @p's runqueue is an RT task, then
* try to see if we can wake this RT task up on another
* runqueue. Otherwise simply start this RT task
* on its current runqueue.
*
* We want to avoid overloading runqueues. If the woken
* task is a higher priority, then it will stay on this CPU
* and the lower prio task should be moved to another CPU.
* Even though this will probably make the lower prio task
* lose its cache, we do not want to bounce a higher task
* around just because it gave up its CPU, perhaps for a
* lock?
*
* For equal prio tasks, we just let the scheduler sort it out.
*
* Otherwise, just let it ride on the affined RQ and the
* post-schedule router will push the preempted task away
*
* This test is optimistic, if we get it wrong the load-balancer
* will have to sort it out.
*/
may_not_preempt = task_may_not_preempt(curr, cpu);
if (energy_aware() || may_not_preempt ||
(unlikely(rt_task(curr)) &&
(tsk_nr_cpus_allowed(curr) < 2 ||
curr->prio <= p->prio))) {
int target = find_lowest_rq(p);
/*
* Check once for losing a race with the other core's irq
* handler. This does not happen frequently, but it can avoid
* delaying the execution of the RT task in those cases.
*/
if (target != -1) {
tgt_task = READ_ONCE(cpu_rq(target)->curr);
if (task_may_not_preempt(tgt_task, target))
target = find_lowest_rq(p);
}
/*
* If cpu is non-preemptible, prefer remote cpu
* even if it's running a higher-prio task.
* Otherwise: Don't bother moving it if the
* destination CPU is not running a lower priority task.
*/
if (target != -1 &&
(may_not_preempt ||
p->prio < cpu_rq(target)->rt.highest_prio.curr))
cpu = target;
}
rcu_read_unlock();
out:
return cpu;
}
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
{
/*
* Current can't be migrated, useless to reschedule,
* let's hope p can move out.
*/
if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
return;
/*
* p is migratable, so let's not schedule it and
* see if it is pushed or pulled somewhere else.
*/
if (tsk_nr_cpus_allowed(p) != 1
&& cpupri_find(&rq->rd->cpupri, p, NULL))
return;
/*
* There appears to be other cpus that can accept
* current and none to run 'p', so lets reschedule
* to try and push current away:
*/
requeue_task_rt(rq, p, 1);
resched_curr(rq);
}
#endif /* CONFIG_SMP */
/*
* Preempt the current task with a newly woken task if needed:
*/
static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
{
if (p->prio < rq->curr->prio) {
resched_curr(rq);
return;
}
#ifdef CONFIG_SMP
/*
* If:
*
* - the newly woken task is of equal priority to the current task
* - the newly woken task is non-migratable while current is migratable
* - current will be preempted on the next reschedule
*
* we should check to see if current can readily move to a different
* cpu. If so, we will reschedule to allow the push logic to try
* to move current somewhere else, making room for our non-migratable
* task.
*/
if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
check_preempt_equal_prio(rq, p);
#endif
}
static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
struct rt_rq *rt_rq)
{
struct rt_prio_array *array = &rt_rq->active;
struct sched_rt_entity *next = NULL;
struct list_head *queue;
int idx;
idx = sched_find_first_bit(array->bitmap);
BUG_ON(idx >= MAX_RT_PRIO);
queue = array->queue + idx;
next = list_entry(queue->next, struct sched_rt_entity, run_list);
return next;
}
static struct task_struct *_pick_next_task_rt(struct rq *rq)
{
struct sched_rt_entity *rt_se;
struct task_struct *p;
struct rt_rq *rt_rq = &rq->rt;
do {
rt_se = pick_next_rt_entity(rq, rt_rq);
BUG_ON(!rt_se);
rt_rq = group_rt_rq(rt_se);
} while (rt_rq);
p = rt_task_of(rt_se);
p->se.exec_start = rq_clock_task(rq);
return p;
}
static struct task_struct *
pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
struct task_struct *p;
struct rt_rq *rt_rq = &rq->rt;
if (need_pull_rt_task(rq, prev)) {
/*
* This is OK, because current is on_cpu, which avoids it being
* picked for load-balance and preemption/IRQs are still
* disabled avoiding further scheduler activity on it and we're
* being very careful to re-start the picking loop.
*/
rq_unpin_lock(rq, rf);
pull_rt_task(rq);
rq_repin_lock(rq, rf);
/*
* pull_rt_task() can drop (and re-acquire) rq->lock; this
* means a dl or stop task can slip in, in which case we need
* to re-start task selection.
*/
if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
rq->dl.dl_nr_running))
return RETRY_TASK;
}
/*
* We may dequeue prev's rt_rq in put_prev_task().
* So, we update time before rt_nr_running check.
*/
if (prev->sched_class == &rt_sched_class)
update_curr_rt(rq);
if (!rt_rq->rt_queued)
return NULL;
put_prev_task(rq, prev);
p = _pick_next_task_rt(rq);
/* The running task is never eligible for pushing */
dequeue_pushable_task(rq, p);
queue_push_tasks(rq);
return p;
}
static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
{
update_curr_rt(rq);
/*
* The previous task needs to be made eligible for pushing
* if it is still active
*/
if (on_rt_rq(&p->rt) && tsk_nr_cpus_allowed(p) > 1)
enqueue_pushable_task(rq, p);
}
#ifdef CONFIG_SMP
/* Only try algorithms three times */
#define RT_MAX_TRIES 3
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
return 1;
return 0;
}
/*
* Return the highest pushable rq's task, which is suitable to be executed
* on the cpu, NULL otherwise
*/
static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
{
struct plist_head *head = &rq->rt.pushable_tasks;
struct task_struct *p;
if (!has_pushable_tasks(rq))
return NULL;
plist_for_each_entry(p, head, pushable_tasks) {
if (pick_rt_task(rq, p, cpu))
return p;
}
return NULL;
}
#ifdef CONFIG_SCHED_CORE_ROTATE
static int rotate_cpu_start;
static DEFINE_SPINLOCK(rotate_lock);
static unsigned long avoid_prev_cpu_last;
static struct find_first_cpu_bit_env first_cpu_bit_env = {
.avoid_prev_cpu_last = &avoid_prev_cpu_last,
.rotate_cpu_start = &rotate_cpu_start,
.interval = HZ,
.rotate_lock = &rotate_lock,
};
#endif
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
static int find_lowest_rq(struct task_struct *task)
{
struct sched_domain *sd;
struct sched_group *sg, *sg_target;
struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
int this_cpu = smp_processor_id();
int cpu = -1, best_cpu;
struct cpumask search_cpu, backup_search_cpu;
unsigned long cpu_capacity;
unsigned long best_capacity;
unsigned long util, best_cpu_util = ULONG_MAX;
unsigned long best_cpu_util_cum = ULONG_MAX;
unsigned long util_cum;
unsigned long tutil = task_util(task);
int best_cpu_idle_idx = INT_MAX;
int cpu_idle_idx = -1;
enum sched_boost_policy placement_boost;
int prev_cpu = task_cpu(task);
int start_cpu = walt_start_cpu(prev_cpu);
bool do_rotate = false;
bool avoid_prev_cpu = false;
/* Make sure the mask is initialized first */
if (unlikely(!lowest_mask))
return -1;
if (tsk_nr_cpus_allowed(task) == 1)
return -1; /* No other targets possible */
if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
return -1; /* No targets found */
if (energy_aware()) {
sg_target = NULL;
best_cpu = -1;
placement_boost = sched_boost() == FULL_THROTTLE_BOOST ?
sched_boost_policy() : SCHED_BOOST_NONE;
best_capacity = placement_boost ? 0 : ULONG_MAX;
sd = rcu_dereference(per_cpu(sd_ea, start_cpu));
if (!sd) {
goto noea;
}
sg = sd->groups;
do {
if (!cpumask_intersects(lowest_mask,
sched_group_cpus(sg)))
continue;
if (!sysctl_sched_is_big_little) {
sg_target = sg;
break;
}
cpu = group_first_cpu(sg);
cpu_capacity = capacity_orig_of(cpu);
if (unlikely(placement_boost)) {
if (cpu_capacity > best_capacity) {
best_capacity = cpu_capacity;
sg_target = sg;
}
} else {
if (cpu_capacity < best_capacity) {
best_capacity = cpu_capacity;
sg_target = sg;
}
}
} while (sg = sg->next, sg != sd->groups);
if (sg_target) {
cpumask_and(&search_cpu, lowest_mask,
sched_group_cpus(sg_target));
cpumask_copy(&backup_search_cpu, lowest_mask);
cpumask_andnot(&backup_search_cpu, &backup_search_cpu,
&search_cpu);
cpu = find_first_cpu_bit(task, &search_cpu, sg_target,
&avoid_prev_cpu, &do_rotate,
&first_cpu_bit_env);
} else {
cpumask_copy(&search_cpu, lowest_mask);
cpumask_clear(&backup_search_cpu);
cpu = -1;
}
retry:
while ((cpu = cpumask_next(cpu, &search_cpu)) < nr_cpu_ids) {
cpumask_clear_cpu(cpu, &search_cpu);
/*
* Don't use capcity_curr_of() since it will
* double count rt task load.
*/
util = cpu_util(cpu);
if (avoid_prev_cpu && cpu == prev_cpu)
continue;
if (__cpu_overutilized(cpu, tutil))
continue;
if (cpu_isolated(cpu))
continue;
if (sched_cpu_high_irqload(cpu))
continue;
/* Find the least loaded CPU */
if (util > best_cpu_util)
continue;
/*
* If the previous CPU has same load, keep it as
* best_cpu.
*/
if (best_cpu_util == util && best_cpu == task_cpu(task))
continue;
/*
* If candidate CPU is the previous CPU, select it.
* Otherwise, if its load is same with best_cpu and in
* a shallower C-state, select it. If all above
* conditions are same, select the least cumulative
* window demand CPU.
*/
if (sysctl_sched_cstate_aware)
cpu_idle_idx = idle_get_state_idx(cpu_rq(cpu));
util_cum = cpu_util_cum(cpu, 0);
if (cpu != task_cpu(task) && best_cpu_util == util) {
if (best_cpu_idle_idx < cpu_idle_idx)
continue;
if (best_cpu_idle_idx == cpu_idle_idx &&
best_cpu_util_cum < util_cum)
continue;
}
best_cpu_idle_idx = cpu_idle_idx;
best_cpu_util_cum = util_cum;
best_cpu_util = util;
best_cpu = cpu;
}
if (do_rotate) {
/*
* We started iteration somewhere in the middle of
* cpumask. Iterate once again from bit 0 to the
* previous starting point bit.
*/
do_rotate = false;
cpu = -1;
goto retry;
}
if (best_cpu != -1 && placement_boost != SCHED_BOOST_ON_ALL) {
return best_cpu;
} else if (!cpumask_empty(&backup_search_cpu)) {
cpumask_copy(&search_cpu, &backup_search_cpu);
cpumask_clear(&backup_search_cpu);
cpu = -1;
placement_boost = SCHED_BOOST_NONE;
goto retry;
}
}
noea:
cpu = task_cpu(task);
/*
* At this point we have built a mask of cpus representing the
* lowest priority tasks in the system. Now we want to elect
* the best one based on our affinity and topology.
*
* We prioritize the last cpu that the task executed on since
* it is most likely cache-hot in that location.
*/
if (cpumask_test_cpu(cpu, lowest_mask))
return cpu;
/*
* Otherwise, we consult the sched_domains span maps to figure
* out which cpu is logically closest to our hot cache data.
*/
if (!cpumask_test_cpu(this_cpu, lowest_mask))
this_cpu = -1; /* Skip this_cpu opt if not among lowest */
for_each_domain(cpu, sd) {
if (sd->flags & SD_WAKE_AFFINE) {
int best_cpu;
/*
* "this_cpu" is cheaper to preempt than a
* remote processor.
*/
if (this_cpu != -1 &&
cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
return this_cpu;
}
best_cpu = cpumask_first_and(lowest_mask,
sched_domain_span(sd));
if (best_cpu < nr_cpu_ids) {
return best_cpu;
}
}
}
/*
* And finally, if there were no matches within the domains
* just give the caller *something* to work with from the compatible
* locations.
*/
if (this_cpu != -1)
return this_cpu;
cpu = cpumask_any(lowest_mask);
if (cpu < nr_cpu_ids)
return cpu;
return -1;
}
/* Will lock the rq it finds */
static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
{
struct rq *lowest_rq = NULL;
int tries;
int cpu;
for (tries = 0; tries < RT_MAX_TRIES; tries++) {
rcu_read_lock();
cpu = find_lowest_rq(task);
rcu_read_unlock();
if ((cpu == -1) || (cpu == rq->cpu))
break;
lowest_rq = cpu_rq(cpu);
if (lowest_rq->rt.highest_prio.curr <= task->prio) {
/*
* Target rq has tasks of equal or higher priority,
* retrying does not release any lock and is unlikely
* to yield a different result.
*/
lowest_rq = NULL;
break;
}
/* if the prio of this runqueue changed, try again */
if (double_lock_balance(rq, lowest_rq)) {
/*
* We had to unlock the run queue. In
* the mean time, task could have
* migrated already or had its affinity changed.
* Also make sure that it wasn't scheduled on its rq.
*/
if (unlikely(task_rq(task) != rq ||
!cpumask_test_cpu(lowest_rq->cpu,
tsk_cpus_allowed(task)) ||
task_running(rq, task) ||
!rt_task(task) ||
!task_on_rq_queued(task))) {
double_unlock_balance(rq, lowest_rq);
lowest_rq = NULL;
break;
}
}
/* If this rq is still suitable use it. */
if (lowest_rq->rt.highest_prio.curr > task->prio)
break;
/* try again */
double_unlock_balance(rq, lowest_rq);
lowest_rq = NULL;
}
return lowest_rq;
}
static struct task_struct *pick_next_pushable_task(struct rq *rq)
{
struct task_struct *p;
if (!has_pushable_tasks(rq))
return NULL;
p = plist_first_entry(&rq->rt.pushable_tasks,
struct task_struct, pushable_tasks);
BUG_ON(rq->cpu != task_cpu(p));
BUG_ON(task_current(rq, p));
BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
BUG_ON(!task_on_rq_queued(p));
BUG_ON(!rt_task(p));
return p;
}
/*
* If the current CPU has more than one RT task, see if the non
* running task can migrate over to a CPU that is running a task
* of lesser priority.
*/
static int push_rt_task(struct rq *rq)
{
struct task_struct *next_task;
struct rq *lowest_rq;
int ret = 0;
if (!rq->rt.overloaded)
return 0;
next_task = pick_next_pushable_task(rq);
if (!next_task)
return 0;
retry:
if (unlikely(next_task == rq->curr)) {
WARN_ON(1);
return 0;
}
/*
* It's possible that the next_task slipped in of
* higher priority than current. If that's the case
* just reschedule current.
*/
if (unlikely(next_task->prio < rq->curr->prio)) {
resched_curr(rq);
return 0;
}
/* We might release rq lock */
get_task_struct(next_task);
/* find_lock_lowest_rq locks the rq if found */
lowest_rq = find_lock_lowest_rq(next_task, rq);
if (!lowest_rq) {
struct task_struct *task;
/*
* find_lock_lowest_rq releases rq->lock
* so it is possible that next_task has migrated.
*
* We need to make sure that the task is still on the same
* run-queue and is also still the next task eligible for
* pushing.
*/
task = pick_next_pushable_task(rq);
if (task_cpu(next_task) == rq->cpu && task == next_task) {
/*
* The task hasn't migrated, and is still the next
* eligible task, but we failed to find a run-queue
* to push it to. Do not retry in this case, since
* other cpus will pull from us when ready.
*/
goto out;
}
if (!task)
/* No more tasks, just exit */
goto out;
/*
* Something has shifted, try again.
*/
put_task_struct(next_task);
next_task = task;
goto retry;
}
next_task->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(rq, next_task, 0);
set_task_cpu(next_task, lowest_rq->cpu);
activate_task(lowest_rq, next_task, 0);
next_task->on_rq = TASK_ON_RQ_QUEUED;
ret = 1;
resched_curr(lowest_rq);
double_unlock_balance(rq, lowest_rq);
out:
put_task_struct(next_task);
return ret;
}
static void push_rt_tasks(struct rq *rq)
{
/* push_rt_task will return true if it moved an RT */
while (push_rt_task(rq))
;
}
#ifdef HAVE_RT_PUSH_IPI
/*
* When a high priority task schedules out from a CPU and a lower priority
* task is scheduled in, a check is made to see if there's any RT tasks
* on other CPUs that are waiting to run because a higher priority RT task
* is currently running on its CPU. In this case, the CPU with multiple RT
* tasks queued on it (overloaded) needs to be notified that a CPU has opened
* up that may be able to run one of its non-running queued RT tasks.
*
* All CPUs with overloaded RT tasks need to be notified as there is currently
* no way to know which of these CPUs have the highest priority task waiting
* to run. Instead of trying to take a spinlock on each of these CPUs,
* which has shown to cause large latency when done on machines with many
* CPUs, sending an IPI to the CPUs to have them push off the overloaded
* RT tasks waiting to run.
*
* Just sending an IPI to each of the CPUs is also an issue, as on large
* count CPU machines, this can cause an IPI storm on a CPU, especially
* if its the only CPU with multiple RT tasks queued, and a large number
* of CPUs scheduling a lower priority task at the same time.
*
* Each root domain has its own irq work function that can iterate over
* all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
* tassk must be checked if there's one or many CPUs that are lowering
* their priority, there's a single irq work iterator that will try to
* push off RT tasks that are waiting to run.
*
* When a CPU schedules a lower priority task, it will kick off the
* irq work iterator that will jump to each CPU with overloaded RT tasks.
* As it only takes the first CPU that schedules a lower priority task
* to start the process, the rto_start variable is incremented and if
* the atomic result is one, then that CPU will try to take the rto_lock.
* This prevents high contention on the lock as the process handles all
* CPUs scheduling lower priority tasks.
*
* All CPUs that are scheduling a lower priority task will increment the
* rt_loop_next variable. This will make sure that the irq work iterator
* checks all RT overloaded CPUs whenever a CPU schedules a new lower
* priority task, even if the iterator is in the middle of a scan. Incrementing
* the rt_loop_next will cause the iterator to perform another scan.
*
*/
static int rto_next_cpu(struct root_domain *rd)
{
int next;
int cpu;
/*
* When starting the IPI RT pushing, the rto_cpu is set to -1,
* rt_next_cpu() will simply return the first CPU found in
* the rto_mask.
*
* If rto_next_cpu() is called with rto_cpu is a valid cpu, it
* will return the next CPU found in the rto_mask.
*
* If there are no more CPUs left in the rto_mask, then a check is made
* against rto_loop and rto_loop_next. rto_loop is only updated with
* the rto_lock held, but any CPU may increment the rto_loop_next
* without any locking.
*/
for (;;) {
/* When rto_cpu is -1 this acts like cpumask_first() */
cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
rd->rto_cpu = cpu;
if (cpu < nr_cpu_ids)
return cpu;
rd->rto_cpu = -1;
/*
* ACQUIRE ensures we see the @rto_mask changes
* made prior to the @next value observed.
*
* Matches WMB in rt_set_overload().
*/
next = atomic_read_acquire(&rd->rto_loop_next);
if (rd->rto_loop == next)
break;
rd->rto_loop = next;
}
return -1;
}
static inline bool rto_start_trylock(atomic_t *v)
{
return !atomic_cmpxchg_acquire(v, 0, 1);
}
static inline void rto_start_unlock(atomic_t *v)
{
atomic_set_release(v, 0);
}
static void tell_cpu_to_push(struct rq *rq)
{
int cpu = -1;
/* Keep the loop going if the IPI is currently active */
atomic_inc(&rq->rd->rto_loop_next);
/* Only one CPU can initiate a loop at a time */
if (!rto_start_trylock(&rq->rd->rto_loop_start))
return;
raw_spin_lock(&rq->rd->rto_lock);
/*
* The rto_cpu is updated under the lock, if it has a valid cpu
* then the IPI is still running and will continue due to the
* update to loop_next, and nothing needs to be done here.
* Otherwise it is finishing up and an ipi needs to be sent.
*/
if (rq->rd->rto_cpu < 0)
cpu = rto_next_cpu(rq->rd);
raw_spin_unlock(&rq->rd->rto_lock);
rto_start_unlock(&rq->rd->rto_loop_start);
if (cpu >= 0) {
/* Make sure the rd does not get freed while pushing */
sched_get_rd(rq->rd);
irq_work_queue_on(&rq->rd->rto_push_work, cpu);
}
}
/* Called from hardirq context */
void rto_push_irq_work_func(struct irq_work *work)
{
struct root_domain *rd =
container_of(work, struct root_domain, rto_push_work);
struct rq *rq;
int cpu;
rq = this_rq();
/*
* We do not need to grab the lock to check for has_pushable_tasks.
* When it gets updated, a check is made if a push is possible.
*/
if (has_pushable_tasks(rq)) {
raw_spin_lock(&rq->lock);
push_rt_tasks(rq);
raw_spin_unlock(&rq->lock);
}
raw_spin_lock(&rd->rto_lock);
/* Pass the IPI to the next rt overloaded queue */
cpu = rto_next_cpu(rd);
raw_spin_unlock(&rd->rto_lock);
if (cpu < 0) {
sched_put_rd(rd);
return;
}
/* Try the next RT overloaded CPU */
irq_work_queue_on(&rd->rto_push_work, cpu);
}
#endif /* HAVE_RT_PUSH_IPI */
static void pull_rt_task(struct rq *this_rq)
{
int this_cpu = this_rq->cpu, cpu;
bool resched = false;
struct task_struct *p;
struct rq *src_rq;
int rt_overload_count = rt_overloaded(this_rq);
if (likely(!rt_overload_count))
return;
/*
* Match the barrier from rt_set_overloaded; this guarantees that if we
* see overloaded we must also see the rto_mask bit.
*/
smp_rmb();
/* If we are the only overloaded CPU do nothing */
if (rt_overload_count == 1 &&
cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
return;
#ifdef HAVE_RT_PUSH_IPI
if (sched_feat(RT_PUSH_IPI)) {
tell_cpu_to_push(this_rq);
return;
}
#endif
for_each_cpu(cpu, this_rq->rd->rto_mask) {
if (this_cpu == cpu)
continue;
src_rq = cpu_rq(cpu);
/*
* Don't bother taking the src_rq->lock if the next highest
* task is known to be lower-priority than our current task.
* This may look racy, but if this value is about to go
* logically higher, the src_rq will push this task away.
* And if its going logically lower, we do not care
*/
if (src_rq->rt.highest_prio.next >=
this_rq->rt.highest_prio.curr)
continue;
/*
* We can potentially drop this_rq's lock in
* double_lock_balance, and another CPU could
* alter this_rq
*/
double_lock_balance(this_rq, src_rq);
/*
* We can pull only a task, which is pushable
* on its rq, and no others.
*/
p = pick_highest_pushable_task(src_rq, this_cpu);
/*
* Do we have an RT task that preempts
* the to-be-scheduled task?
*/
if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
WARN_ON(p == src_rq->curr);
WARN_ON(!task_on_rq_queued(p));
/*
* There's a chance that p is higher in priority
* than what's currently running on its cpu.
* This is just that p is wakeing up and hasn't
* had a chance to schedule. We only pull
* p if it is lower in priority than the
* current task on the run queue
*/
if (p->prio < src_rq->curr->prio)
goto skip;
resched = true;
p->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(src_rq, p, 0);
set_task_cpu(p, this_cpu);
activate_task(this_rq, p, 0);
p->on_rq = TASK_ON_RQ_QUEUED;
/*
* We continue with the search, just in
* case there's an even higher prio task
* in another runqueue. (low likelihood
* but possible)
*/
}
skip:
double_unlock_balance(this_rq, src_rq);
}
if (resched)
resched_curr(this_rq);
}
/*
* If we are not running and we are not going to reschedule soon, we should
* try to push tasks away now
*/
static void task_woken_rt(struct rq *rq, struct task_struct *p)
{
if (!task_running(rq, p) &&
!test_tsk_need_resched(rq->curr) &&
tsk_nr_cpus_allowed(p) > 1 &&
(dl_task(rq->curr) || rt_task(rq->curr)) &&
(tsk_nr_cpus_allowed(rq->curr) < 2 ||
rq->curr->prio <= p->prio))
push_rt_tasks(rq);
}
/* Assumes rq->lock is held */
static void rq_online_rt(struct rq *rq)
{
if (rq->rt.overloaded)
rt_set_overload(rq);
__enable_runtime(rq);
cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
}
/* Assumes rq->lock is held */
static void rq_offline_rt(struct rq *rq)
{
if (rq->rt.overloaded)
rt_clear_overload(rq);
__disable_runtime(rq);
cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
}
/*
* When switch from the rt queue, we bring ourselves to a position
* that we might want to pull RT tasks from other runqueues.
*/
static void switched_from_rt(struct rq *rq, struct task_struct *p)
{
/*
* If there are other RT tasks then we will reschedule
* and the scheduling of the other RT tasks will handle
* the balancing. But if we are the last RT task
* we may need to handle the pulling of RT tasks
* now.
*/
if (!task_on_rq_queued(p) || rq->rt.rt_nr_running ||
cpu_isolated(cpu_of(rq)))
return;
queue_pull_task(rq);
}
void __init init_sched_rt_class(void)
{
unsigned int i;
for_each_possible_cpu(i) {
zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
GFP_KERNEL, cpu_to_node(i));
}
}
#endif /* CONFIG_SMP */
/*
* When switching a task to RT, we may overload the runqueue
* with RT tasks. In this case we try to push them off to
* other runqueues.
*/
static void switched_to_rt(struct rq *rq, struct task_struct *p)
{
/*
* If we are already running, then there's nothing
* that needs to be done. But if we are not running
* we may need to preempt the current running task.
* If that current running task is also an RT task
* then see if we can move to another run queue.
*/
if (task_on_rq_queued(p) && rq->curr != p) {
#ifdef CONFIG_SMP
if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded)
queue_push_tasks(rq);
#endif /* CONFIG_SMP */
if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
resched_curr(rq);
}
}
/*
* Priority of the task has changed. This may cause
* us to initiate a push or pull.
*/
static void
prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
{
if (!task_on_rq_queued(p))
return;
if (rq->curr == p) {
#ifdef CONFIG_SMP
/*
* If our priority decreases while running, we
* may need to pull tasks to this runqueue.
*/
if (oldprio < p->prio)
queue_pull_task(rq);
/*
* If there's a higher priority task waiting to run
* then reschedule.
*/
if (p->prio > rq->rt.highest_prio.curr)
resched_curr(rq);
#else
/* For UP simply resched on drop of prio */
if (oldprio < p->prio)
resched_curr(rq);
#endif /* CONFIG_SMP */
} else {
/*
* This task is not running, but if it is
* greater than the current running task
* then reschedule.
*/
if (p->prio < rq->curr->prio)
resched_curr(rq);
}
}
static void watchdog(struct rq *rq, struct task_struct *p)
{
unsigned long soft, hard;
/* max may change after cur was read, this will be fixed next tick */
soft = task_rlimit(p, RLIMIT_RTTIME);
hard = task_rlimit_max(p, RLIMIT_RTTIME);
if (soft != RLIM_INFINITY) {
unsigned long next;
if (p->rt.watchdog_stamp != jiffies) {
p->rt.timeout++;
p->rt.watchdog_stamp = jiffies;
}
next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
if (p->rt.timeout > next)
p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
}
}
static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
{
struct sched_rt_entity *rt_se = &p->rt;
update_curr_rt(rq);
watchdog(rq, p);
/*
* RR tasks need a special form of timeslice management.
* FIFO tasks have no timeslices.
*/
if (p->policy != SCHED_RR)
return;
if (--p->rt.time_slice)
return;
p->rt.time_slice = sched_rr_timeslice;
/*
* Requeue to the end of queue if we (and all of our ancestors) are not
* the only element on the queue
*/
for_each_sched_rt_entity(rt_se) {
if (rt_se->run_list.prev != rt_se->run_list.next) {
requeue_task_rt(rq, p, 0);
resched_curr(rq);
return;
}
}
}
static void set_curr_task_rt(struct rq *rq)
{
struct task_struct *p = rq->curr;
p->se.exec_start = rq_clock_task(rq);
/* The running task is never eligible for pushing */
dequeue_pushable_task(rq, p);
}
static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
{
/*
* Time slice is 0 for SCHED_FIFO tasks
*/
if (task->policy == SCHED_RR)
return sched_rr_timeslice;
else
return 0;
}
const struct sched_class rt_sched_class = {
.next = &fair_sched_class,
.enqueue_task = enqueue_task_rt,
.dequeue_task = dequeue_task_rt,
.yield_task = yield_task_rt,
.check_preempt_curr = check_preempt_curr_rt,
.pick_next_task = pick_next_task_rt,
.put_prev_task = put_prev_task_rt,
#ifdef CONFIG_SMP
.select_task_rq = select_task_rq_rt,
.set_cpus_allowed = set_cpus_allowed_common,
.rq_online = rq_online_rt,
.rq_offline = rq_offline_rt,
.task_woken = task_woken_rt,
.switched_from = switched_from_rt,
#endif
.set_curr_task = set_curr_task_rt,
.task_tick = task_tick_rt,
.get_rr_interval = get_rr_interval_rt,
.prio_changed = prio_changed_rt,
.switched_to = switched_to_rt,
.update_curr = update_curr_rt,
#ifdef CONFIG_SCHED_WALT
.fixup_walt_sched_stats = fixup_walt_sched_stats_common,
#endif
};
#ifdef CONFIG_SCHED_DEBUG
extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
void print_rt_stats(struct seq_file *m, int cpu)
{
rt_rq_iter_t iter;
struct rt_rq *rt_rq;
rcu_read_lock();
for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
print_rt_rq(m, cpu, rt_rq);
rcu_read_unlock();
}
#endif /* CONFIG_SCHED_DEBUG */