# By Daniel Rosenberg (98) and others
# Via Greg Kroah-Hartman (219) and others
* google/common/android-4.4-p:
ANDROID: xt_qtaguid: fix UAF race
ANDROID: Make vsock virtio packet buff size configurable
ANDROID: cuttlefish_defconfig: add missing CONFIG_BLK_CGROUP
ANDROID: xt_qtaguid: Remove tag_entry from process list on untag
ANDROID: usb: f_accessory: Don't drop NULL reference in acc_disconnect()
ANDROID: usb: f_accessory: Avoid bitfields for shared variables
ANDROID: usb: f_accessory: Cancel any pending work before teardown
ANDROID: usb: f_accessory: Don't corrupt global state on double registration
ANDROID: usb: f_accessory: Fix teardown ordering in acc_release()
ANDROID: usb: f_accessory: Add refcounting to global 'acc_dev'
UPSTREAM: locking/atomic, kref: Add KREF_INIT()
ANDROID: usb: f_accessory: Wrap '_acc_dev' in get()/put() accessors
ANDROID: usb: f_accessory: Remove useless assignment
ANDROID: usb: f_accessory: Remove useless non-debug prints
ANDROID: usb: f_accessory: Remove stale comments
ANDROID: USB: f_accessory: Check dev pointer before decoding ctrl request
ANDROID: usb: gadget: f_accessory: fix CTS test stuck
ANDROID: cuttlefish_defconfig: Disable CONFIG_KSM
UPSTREAM: arm64: SW PAN: Point saved ttbr0 at the zero page when switching to init_mm
UPSTREAM: arm64: kaslr: Fix up the kernel image alignment
UPSTREAM: sched/fair: Fix FTQ noise bench regression
UPSTREAM: dm verity fec: fix bufio leaks
UPSTREAM: arm64: kernel: restrict /dev/mem read() calls to linear region
UPSTREAM: binder: fix incorrect cmd to binder_stat_br
UPSTREAM: arm64: SW PAN: Update saved ttbr0 value on enter_lazy_tlb
UPSTREAM: KVM: arm/arm64: Check pagesize when allocating a hugepage at Stage 2
UPSTREAM: fs/proc/kcore.c: use probe_kernel_read() instead of memcpy()
UPSTREAM: arm64: fix unwind_frame() for filtered out fn for function graph tracing
UPSTREAM: arm64: kpti: Use early_param for kpti= command-line option
UPSTREAM: arm64: kaslr: ensure randomized quantities are clean to the PoC
UPSTREAM: arm64: kaslr: ensure randomized quantities are clean also when kaslr is off
UPSTREAM: staging: android: vsoc: fix copy_from_user overrun
UPSTREAM: arm64/mm: Inhibit huge-vmap with ptdump
UPSTREAM: drivers/perf: arm_pmu: Fix failure path in PM notifier
UPSTREAM: fs/posix_acl.c: fix kernel-doc warnings
UPSTREAM: ext2: fix debug reference to ext2_xattr_cache
UPSTREAM: arm64: alternative: fix build with clang integrated assembler
UPSTREAM: dm verity fec: fix hash block number in verity_fec_decode
ANDROID: Temporarily disable XFRM_USER_COMPAT filtering
BACKPORT: xfrm/compat: Translate 32-bit user_policy from sockptr
BACKPORT: xfrm/compat: Add 32=>64-bit messages translator
UPSTREAM: xfrm/compat: Attach xfrm dumps to 64=>32 bit translator
BACKPORT: xfrm/compat: Add 64=>32-bit messages translator
BACKPORT: xfrm: Provide API to register translator module
UPSTREAM: mm/sl[uo]b: export __kmalloc_track(_node)_caller
ANDROID: Publish uncompressed Image on aarch64
ANDROID: Makefile: append BUILD_NUMBER to version string when defined
UPSTREAM: binder: fix UAF when releasing todo list
ANDROID: fix a bug in quota2
UPSTREAM: binder: Prevent context manager from incrementing ref 0
BACKPORT: xtables: extend matches and targets with .usersize
UPSTREAM: ip6tables: use match, target and data copy_to_user helpers
UPSTREAM: iptables: use match, target and data copy_to_user helpers
UPSTREAM: xtables: add xt_match, xt_target and data copy_to_user functions
ANDROID: cuttlefish_defconfig: Drop built-in cmdline (except nopti)
ANDROID: cuttlefish defconfig - enable mount/net/uts namespaces.
ANDROID: hid: steam: remove BT controller matching
UPSTREAM: HID: steam: Fix input device disappearing
Revert "ext2: fix empty body warnings when -Wextra is used"
Revert "net: ipv6: Fix processing of RAs in presence of VRF"
UPSTREAM: net: socket: set sock->sk to NULL after calling proto_ops::release()
BACKPORT: xfrm: Allow Output Mark to be Updated Using UPDSA
UPSTREAM: socket: close race condition between sock_close() and sockfs_setattr()
UPSTREAM: net: ipv6: Use passed in table for nexthop lookups
ANDROID: cuttlefish_defconfig: Fix dm-verity related options
Revert "ANDROID: dm verity: add minimum prefetch size"
ANDROID: mnt: Propagate remount correctly
BACKPORT: loop: Add LOOP_SET_BLOCK_SIZE in compat ioctl
UPSTREAM: loop: drop caches if offset or block_size are changed
UPSTREAM: loop: add ioctl for changing logical block size
BACKPORT: block/loop: set hw_sectors
ANDROID: cuttlefish_defconfig: Minimally enable EFI
UPSTREAM: bpf: Explicitly memset the bpf_attr structure
FROMLIST: HID: nintendo: add nintendo switch controller driver
UPSTREAM: staging: most: net: fix buffer overflow
UPSTREAM: ALSA: pcm: Add missing copy ops check before clearing buffer
ANDROID: selinux: modify RTM_GETLINK permission
UPSTREAM: HID: input: map digitizer battery usage
UPSTREAM: HID: input: ignore the battery in OKLICK Laser BTmouse
ANDROID: cuttlefish_defconfig: Disable TRANSPARENT_HUGEPAGE
commit e82b9b0727ff ("vhost: introduce vhost_exceeds_weight()")
UPSTREAM: HID: steam: fix deadlock with input devices.
UPSTREAM: HID: steam: fix boot loop with bluetooth firmware
UPSTREAM: HID: steam: remove input device when a hid client is running.
UPSTREAM: HID: steam: use hid_device.driver_data instead of hid_set_drvdata()
UPSTREAM: HID: steam: add missing fields in client initialization
UPSTREAM: HID: steam: add battery device.
UPSTREAM: HID: add driver for Valve Steam Controller
UPSTREAM: HID: sony: Fix memory corruption issue on cleanup.
UPSTREAM: HID: sony: Fix race condition between rumble and device remove.
UPSTREAM: HID: sony: remove redundant check for -ve err
UPSTREAM: HID: sony: Make sure to unregister sensors on failure
UPSTREAM: HID: sony: Make DS4 bt poll interval adjustable
UPSTREAM: HID: sony: Set proper bit flags on DS4 output report
UPSTREAM: HID: sony: DS4 use brighter LED colors
UPSTREAM: HID: sony: Improve navigation controller axis/button mapping
UPSTREAM: HID: sony: Use DS3 MAC address as unique identifier on USB
UPSTREAM: HID: sony: Perform duplicate device check earlier on
UPSTREAM: HID: sony: Expose DS3 motion sensors through separate device
UPSTREAM: HID: sony: Print error on failure to active DS3 / Navigation controllers
UPSTREAM: HID: sony: DS3 comply to Linux gamepad spec
UPSTREAM: HID: sony: Mark DS4 touchpad device as a pointer
UPSTREAM: HID: sony: Support motion sensor calibration on dongle
UPSTREAM: HID: sony: Make work handling more generic
UPSTREAM: HID: sony: Treat the ds4 dongle as a separate device
UPSTREAM: HID: sony: Remove report descriptor fixup for DS4
UPSTREAM: HID: sony: Report hardware timestamp for DS4 sensor values
UPSTREAM: HID: sony: Calibrate DS4 motion sensors
UPSTREAM: HID: sony: Report DS4 motion sensors through a separate device
UPSTREAM: HID: sony: Fix input device leak when connecting a DS4 twice using USB/BT
UPSTREAM: HID: sony: Use LED_CORE_SUSPENDRESUME
UPSTREAM: HID: sony: Ignore DS4 dongle reports when no device is connected
UPSTREAM: HID: sony: Use DS4 MAC address as unique identifier on USB
UPSTREAM: HID: sony: Fix error handling bug when touchpad registration fails
UPSTREAM: HID: sony: Comply to Linux gamepad spec for DS4
UPSTREAM: HID: sony: Make the DS4 touchpad a separate device
UPSTREAM: HID: sony: Fix memory issue when connecting device using both Bluetooth and USB
UPSTREAM: HID: sony: Adjust value range for motion sensors
UPSTREAM: HID: sony: Handle multiple touch events input record
UPSTREAM: HID: sony: Send ds4 output reports on output end-point
UPSTREAM: HID: sony: Perform CRC check on bluetooth input packets
UPSTREAM: HID: sony: Adjust HID report size name definitions
UPSTREAM: HID: sony: Fix race condition in sony_probe
UPSTREAM: HID: sony: Update copyright and add Dualshock 4 rate control note
UPSTREAM: HID: sony: Defer the initial USB Sixaxis output report
UPSTREAM: HID: sony: Relax duplicate checking for USB-only devices
UPSTREAM: HID: sony: underscores are unnecessary for u8, u16, s32
UPSTREAM: HID: sony: fix some warnings from scripts/checkpatch.pl
UPSTREAM: HID: sony: fix errors from scripts/checkpatch.pl
UPSTREAM: HID: sony: fix a typo in descriptors comments s/Joystik/Joystick/
UPSTREAM: HID: sony: Fixup output reports for the nyko core controller
UPSTREAM: HID: sony: Remove the size check for the Dualshock 4 HID Descriptor
UPSTREAM: HID: sony: Save and restore the controller state on suspend and resume
UPSTREAM: HID: sony: Refactor the output report sending functions
ANDROID: cpufreq: times: add /proc/uid_concurrent_{active,policy}_time
rtlwifi: Fix potential overflow on P2P code
ANDROID: clang: update to 9.0.8 based on r365631c
ANDROID: move up spin_unlock_bh() ahead of remove_proc_entry()
ANDROID: refactor build.config files to remove duplication
ANDROID: usb: gadget: Fix dependency for f_accessory
Remove taskname from lowmemorykiller kill reports
ANDROID: Fixes to locking around handle_lmk_event
Revert "ANDROID: regression introduced override_creds=off"
ANDROID: regression introduced override_creds=off
Fix fallout from changes to bootparam_utils.h
ANDROID: sched: Disallow WALT with CFS bandwidth control
ANDROID: fiq_debugger: remove
ANDROID: arm64: fix leftover RWX when using CONFIG_UNMAP_KERNEL_AT_EL0
ANDROID: fix kernelci build-break in lowmemorykiller
ANDROID: Avoid taking multiple locks in handle_lmk_event
UPSTREAM: net-ipv6-ndisc: add support for RFC7710 RA Captive Portal Identifier
ANDROID: fix binder change in merge of 4.4.183
Fix overlayfs build break
binder: binder: fix possible UAF when freeing buffer
ANDROID: Revert "f2fs: avoid out-of-range memory access"
ANDROID: overlayfs: Fix a regression in commit b24be4acd
ANDROID: enable CONFIG_RTC_DRV_TEST on cuttlefish
ANDROID: xfrm: remove in_compat_syscall() checks
BACKPORT: binder: Set end of SG buffer area properly.
UPSTREAM: binder: check for overflow when alloc for security context
BACKPORT: binder: fix race between munmap() and direct reclaim
ANDROID: cuttlefish 4.4: enable CONFIG_CRYPTO_AES_NI_INTEL=y
ANDROID: cuttlefish_defconfig: Disable DEVTMPFS
ANDROID: cuttlefish_defconfig: Enable CONFIG_CPUSETS and CONFIG_CGROUP_SCHEDTUNE
ANDROID: cuttlefish_defconfig: Drop dead CRYPTO options
UPSTREAM: virtio: new feature to detect IOMMU device quirk
UPSTREAM: vring: Use the DMA API on Xen
UPSTREAM: virtio_ring: Support DMA APIs
UPSTREAM: vring: Introduce vring_use_dma_api()
ANDROID: cuttlefish_defconfig: L2TP/PPTP to OLAC/OPNS
ANDROID: cuttlefish_defconfig: Enable DEBUG_SET_MODULE_RONX
ANDROID: Fix cuttlefish redundant vsock connection.
ANDROID: cuttlefish_defconfig: Enable CONFIG_RTC_HCTOSYS
ANDROID: Move from clang r349610 to r353983c.
Make arm64 serial port config compatible with crosvm
UPSTREAM: virt_wifi: Remove REGULATORY_WIPHY_SELF_MANAGED
ANDROID: cuttlefish_defconfig: Add support for AC97 audio
ANDROID: Move from clang r346389b to r349610.
ANDROID: cuttlefish_defconfig: Enable vsock options
UPSTREAM: vhost/vsock: fix reset orphans race with close timeout
UPSTREAM: vhost/vsock: fix use-after-free in network stack callers
UPSTREAM: vhost: correctly check the iova range when waking virtqueue
UPSTREAM: vhost: synchronize IOTLB message with dev cleanup
UPSTREAM: vhost: fix info leak due to uninitialized memory
UPSTREAM: vhost: fix vhost_vq_access_ok() log check
UPSTREAM: vhost: validate log when IOTLB is enabled
UPSTREAM: vhost_net: add missing lock nesting notation
UPSTREAM: vhost: use mutex_lock_nested() in vhost_dev_lock_vqs()
UPSTREAM: vhost/vsock: fix uninitialized vhost_vsock->guest_cid
UPSTREAM: vhost_net: correctly check tx avail during rx busy polling
UPSTREAM: vsock: use new wait API for vsock_stream_sendmsg()
UPSTREAM: vsock: cancel packets when failing to connect
UPSTREAM: vhost-vsock: add pkt cancel capability
UPSTREAM: vsock: track pkt owner vsock
UPSTREAM: vhost: fix initialization for vq->is_le
UPSTREAM: vhost/vsock: handle vhost_vq_init_access() error
UPSTREAM: vsock: lookup and setup guest_cid inside vhost_vsock_lock
UPSTREAM: vhost-vsock: fix orphan connection reset
UPSTREAM: vsock/virtio: fix src/dst cid format
UPSTREAM: VSOCK: Don't dec ack backlog twice for rejected connections
UPSTREAM: vhost/vsock: drop space available check for TX vq
UPSTREAM: virtio-vsock: fix include guard typo
UPSTREAM: vhost/vsock: fix vhost virtio_vsock_pkt use-after-free
UPSTREAM: VSOCK: Use kvfree()
BACKPORT: vhost: split out vringh Kconfig
UPSTREAM: vhost: drop vringh dependency
UPSTREAM: vhost: drop vringh dependency
UPSTREAM: vhost: detect 32 bit integer wrap around
UPSTREAM: VSOCK: Add Makefile and Kconfig
UPSTREAM: VSOCK: Introduce vhost_vsock.ko
UPSTREAM: VSOCK: Introduce virtio_transport.ko
BACKPORT: VSOCK: Introduce virtio_vsock_common.ko
UPSTREAM: VSOCK: defer sock removal to transports
UPSTREAM: VSOCK: transport-specific vsock_transport functions
UPSTREAM: vsock: make listener child lock ordering explicit
UPSTREAM: vhost: new device IOTLB API
BACKPORT: vhost: convert pre sorted vhost memory array to interval tree
UPSTREAM: vhost: introduce vhost memory accessors
UPSTREAM: vhost_net: stop polling socket during rx processing
UPSTREAM: VSOCK: constify vsock_transport structure
UPSTREAM: vhost: lockless enqueuing
UPSTREAM: vhost: simplify work flushing
UPSTREAM: VSOCK: Only check error on skb_recv_datagram when skb is NULL
BACKPORT: AF_VSOCK: Shrink the area influenced by prepare_to_wait
UPSTREAM: vhost_net: basic polling support
UPSTREAM: vhost: introduce vhost_vq_avail_empty()
UPSTREAM: vhost: introduce vhost_has_work()
UPSTREAM: vhost: rename vhost_init_used()
UPSTREAM: vhost: rename cross-endian helpers
UPSTREAM: vhost: fix error path in vhost_init_used()
UPSTREAM: virtio: make find_vqs() checkpatch.pl-friendly
UPSTREAM: net: move napi_hash[] into read mostly section
ANDROID: cuttlefish_defconfig: Enable VIRTIO_INPUT
ANDROID: cuttlefish_defconfig: Enable VIRT_WIFI
FROMGIT, BACKPORT: mac80211-next: rtnetlink wifi simulation device
ANDROID: Move from clang r328903 to r346389b.
ANDROID: arm64 defconfig / build config for cuttlefish
ANDROID: Communicates LMK events to userland where they can be logged
Fix merge issue with 4.4.178
Fix merge issue with 4.4.177
FROMGIT: binder: create node flag to request sender's security context
ion: Disable ION_HEAP_TYPE_SYSTEM_CONTIG
ANDROID: uid_sys_stats: Copy task_struct comm field to bigger buffer
UPSTREAM: binder: fix race that allows malicious free of live buffer
Makefile: Tidy up 4.4.165 merge
ANDROID: sdcardfs: Change current->fs under lock
ANDROID: sdcardfs: Don't use OVERRIDE_CRED macro
arm64/vdso: Fix nsec handling for CLOCK_MONOTONIC_RAW
ANDROID: arm64: mm: fix 4.4.154 merge
Fix backport of "tcp: detect malicious patterns in tcp_collapse_ofo_queue()"
tcp: detect malicious patterns in tcp_collapse_ofo_queue()
tcp: avoid collapses in tcp_prune_queue() if possible
x86_64_cuttlefish_defconfig: Enable android-verity
x86_64_cuttlefish_defconfig: enable verity cert
ANDROID: Fix massive cpufreq_times memory leaks
ANDROID: Reduce use of #ifdef CONFIG_CPU_FREQ_TIMES
UPSTREAM: binder: replace "%p" with "%pK"
UPSTREAM: binder: free memory on error
UPSTREAM: binder: fix proc->files use-after-free
UPSTREAM: Revert "FROMLIST: binder: fix proc->files use-after-free"
UPSTREAM: ANDROID: binder: change down_write to down_read
UPSTREAM: ANDROID: binder: correct the cmd print for BINDER_WORK_RETURN_ERROR
UPSTREAM: ANDROID: binder: remove 32-bit binder interface.
UPSTREAM: ANDROID: binder: re-order some conditions
UPSTREAM: android: binder: use VM_ALLOC to get vm area
UPSTREAM: android: binder: Use true and false for boolean values
UPSTREAM: android: binder: Use octal permissions
UPSTREAM: android: binder: Prefer __func__ to using hardcoded function name
UPSTREAM: ANDROID: binder: make binder_alloc_new_buf_locked static and indent its arguments
UPSTREAM: android: binder: Check for errors in binder_alloc_shrinker_init().
cpufreq: Kconfig: Remove CPU_FREQ_DEFAULT_GOV_SCHED
ANDROID: Add kconfig to make dm-verity check_at_most_once default enabled
ANDROID: sdcardfs: fix potential crash when reserved_mb is not zero
ANDROID: Update arm64 ranchu64_defconfig
FROMLIST: f2fs: run fstrim asynchronously if runtime discard is on
goldfish: pipe: ANDROID: address must be written as __pa(x), not x
goldfish: pipe: ANDROID: add missing check for memory allocated
goldfish: pipe: ANDROID: remove redundant blank lines
Update arch/x86/configs/x86_64_ranchu_defconfig
ANDROID: x86_64_cuttlefish_defconfig: Enable F2FS
ANDROID: Update x86_64_cuttlefish_defconfig
FROMLIST: f2fs: early updates queued for v4.18-rc1
Revert "vti4: Don't override MTU passed on link creation via IFLA_MTU"
UPSTREAM: sched/fair: Consider RT/IRQ pressure in capacity_spare_wake
BACKPORT, FROMLIST: fscrypt: add Speck128/256 support
cgroup: Disable IRQs while holding css_set_lock
Revert "cgroup: Disable IRQs while holding css_set_lock"
cgroup: Disable IRQs while holding css_set_lock
ANDROID: proc: fix undefined behavior in proc_uid_base_readdir
x86: vdso: Fix leaky vdso linker with CC=clang.
ANDROID: build: cuttlefish: Upgrade clang to newer version.
ANDROID: build: cuttlefish: Upgrade clang to newer version.
ANDROID: build: cuttlefish: Fix path to clang.
UPSTREAM: dm bufio: avoid sleeping while holding the dm_bufio lock
ANDROID: sdcardfs: Don't d_drop in d_revalidate
goldfish: pipe: ANDROID: mark local functions static
Revert "goldfish: pipe: ANDROID: Allocate memory with GFP_KERNEL."
UPSTREAM: ANDROID: binder: prevent transactions into own process.
goldfish: pipe: ANDROID: Add DMA support
UPSTREAM: f2fs: clear PageError on writepage - part 2
UPSTREAM: f2fs: avoid fsync() failure caused by EAGAIN in writepage()
ANDROID: build.config: enforce trace_printk check
ANDROID: x86_64_cuttlefish_defconfig: Disable KPTI
UPSTREAM: mac80211: ibss: Fix channel type enum in ieee80211_sta_join_ibss()
UPSTREAM: mac80211: Fix clang warning about constant operand in logical operation
UPSTREAM: nl80211: Fix enum type of variable in nl80211_put_sta_rate()
UPSTREAM: sysfs: remove signedness from sysfs_get_dirent
UPSTREAM: tracing: Use cpumask_available() to check if cpumask variable may be used
BACKPORT: clocksource: Use GENMASK_ULL in definition of CLOCKSOURCE_MASK
UPSTREAM: netpoll: Fix device name check in netpoll_setup()
FROMLIST: staging: Fix sparse warnings in vsoc driver.
FROMLIST: staging: vsoc: Fix a i386-randconfig warning.
FROMLIST: staging: vsoc: Create wc kernel mapping for region shm.
Revert "goldfish: pipe: ANDROID: remove a redundant target"
goldfish: pipe: ANDROID: Replace writel with gf_write_ptr
goldfish: pipe: ANDROID: Use dev_ logging instead of pr_
goldfish: pipe: ANDROID: fix checkpatch warnings
goldfish: pipe: ANDROID: Update module license
goldfish: pipe: ANDROID: Allocate memory with GFP_KERNEL.
goldfish: pipe: ANDROID: Do not crash
goldfish: pipe: ANDROID: remove redundant casting
goldfish: pipe: ANDROID: Add 'pipe' to pipe functions
goldfish: pipe: ANDROID: fix whitespace
goldfish: pipe: ANDROID: rename global variables
goldfish: pipe: ANDROID: remove a redundant target
goldfish: pipe: ANDROID: add blank lines
goldfish: pipe: ANDROID: replace 'BUG_ON' with 'BUILD_BUG_ON'
goldfish: pipe: ANDROID: use the 'BIT' macro for wakeup flags
goldfish: pipe: ANDROID: fix logging format strings
UPSTREAM: tracing: always define trace_{irq,preempt}_{enable_disable}
ANDROID: staging: ion: Obey kptr_restrict
ANDROID: sdcardfs: Set s_root to NULL after putting
ANDROID: sdcardfs: d_make_root calls iput
ANDROID: sdcardfs: Check for private data earlier
f2fs: check cap_resource only for data blocks
Revert "f2fs: introduce f2fs_set_page_dirty_nobuffer"
f2fs: clear PageError on writepage
UPSTREAM: timer: Export destroy_hrtimer_on_stack()
BACKPORT: dm verity: add 'check_at_most_once' option to only validate hashes once
f2fs: call unlock_new_inode() before d_instantiate()
f2fs: refactor read path to allow multiple postprocessing steps
fscrypt: allow synchronous bio decryption
ANDROID: Add build server config for cuttlefish.
ANDROID: Add defconfig for cuttlefish.
FROMLIST: staging: Android: Add 'vsoc' driver for cuttlefish.
Revert "proc: make oom adjustment files user read-only"
Revert "fixup! proc: make oom adjustment files user read-only"
ANDROID: proc: add null check in proc_uid_init
f2fs/fscrypt: updates to v4.17-rc1
Reduce amount of casting in drivers/tty/goldfish.c.
Replace #define with enum for better compilation errors.
Add missing include to drivers/tty/goldfish.c
Fix whitespace in drivers/tty/goldfish.c
ANDROID: fuse: Add null terminator to path in canonical path to avoid issue
ANDROID: sdcardfs: Fix sdcardfs to stop creating cases-sensitive duplicate entries.
ANDROID: add missing include to pdev_bus
ANDROID: pdev_bus: replace writel with gf_write_ptr
ANDROID: Cleanup type casting in goldfish.h
ANDROID: Include missing headers in goldfish.h
ANDROID: cpufreq: times: skip printing invalid frequencies
ANDROID: xt_qtaguid: Remove unnecessary null checks to device's name
ANDROID: xt_qtaguid: Remove unnecessary null checks to ifa_label
ANDROID: cpufreq: times: allocate enough space for a uid_entry
Revert "genirq: Use irqd_get_trigger_type to compare the trigger type for shared IRQs"
UPSTREAM: drm: virtio-gpu: set atomic flag
UPSTREAM: drm: virtio-gpu: transfer dumb buffers to host on plane update
UPSTREAM: drm: virtio-gpu: ensure plane is flushed to host on atomic update
UPSTREAM: drm: virtio-gpu: get the fb from the plane state for atomic updates
ANDROID: cpufreq: times: fix proc_time_in_state_show
dtc: turn off dtc unit address warnings by default
BACKPORT, FROMLIST: crypto: arm64/speck - add NEON-accelerated implementation of Speck-XTS
ANDROID: cpufreq: times: avoid prematurely freeing uid_entry
ANDROID: Use standard logging functions in goldfish_pipe
ANDROID: Fix whitespace in goldfish
staging: android: ashmem: Fix possible deadlock in ashmem_ioctl
llist: clang: introduce member_address_is_nonnull()
ANDROID: ranchu: 32 bit framebuffer support
ANDROID: Address checkpatch warnings in goldfishfb
ANDROID: Address checkpatch.pl warnings in goldfish_pipe
ANDROID: sdcardfs: fix lock issue on 32 bit/SMP architectures
ANDROID: goldfish: Fix typo in goldfish_cmd_locked() call
ANDROID: Address checkpatch.pl warnings in goldfish_pipe_v2
FROMLIST: f2fs: don't put dentry page in pagecache into highmem
ANDROID: Delete the goldfish_nand driver.
ANDROID: Add input support for Android Wear.
ANDROID: proc: fix config & includes for /proc/uid
FROMLIST: ARM: amba: Don't read past the end of sysfs "driver_override" buffer
UPSTREAM: ANDROID: binder: remove WARN() for redundant txn error
ANDROID: cpufreq: times: Add missing includes
ANDROID: cpufreq: Add time_in_state to /proc/uid directories
ANDROID: proc: Add /proc/uid directory
ANDROID: cpufreq: times: track per-uid time in state
ANDROID: cpufreq: track per-task time in state
ANDROID: keychord: Check for write data size
Revert "binder: add missing binder_unlock()"
ANDROID: sdcardfs: Set num in extension_details during make_item
ANDROID: sdcardfs: Hold i_mutex for i_size_write
BACKPORT, FROMGIT: crypto: speck - add test vectors for Speck64-XTS
BACKPORT, FROMGIT: crypto: speck - add test vectors for Speck128-XTS
BACKPORT, FROMGIT: crypto: arm/speck - add NEON-accelerated implementation of Speck-XTS
FROMGIT: crypto: speck - export common helpers
BACKPORT, FROMGIT: crypto: speck - add support for the Speck block cipher
UPSTREAM: ANDROID: binder: synchronize_rcu() when using POLLFREE.
f2fs: updates on v4.16-rc1
BACKPORT: tee: shm: Potential NULL dereference calling tee_shm_register()
BACKPORT: tee: shm: don't put_page on null shm->pages
BACKPORT: tee: shm: make function __tee_shm_alloc static
BACKPORT: tee: optee: check type of registered shared memory
BACKPORT: tee: add start argument to shm_register callback
BACKPORT: tee: optee: fix header dependencies
BACKPORT: tee: shm: inline tee_shm_get_id()
BACKPORT: tee: use reference counting for tee_context
BACKPORT: tee: optee: enable dynamic SHM support
BACKPORT: tee: optee: add optee-specific shared pool implementation
BACKPORT: tee: optee: store OP-TEE capabilities in private data
BACKPORT: tee: optee: add registered buffers handling into RPC calls
BACKPORT: tee: optee: add registered shared parameters handling
BACKPORT: tee: optee: add shared buffer registration functions
BACKPORT: tee: optee: add page list manipulation functions
BACKPORT: tee: optee: Update protocol definitions
BACKPORT: tee: shm: add page accessor functions
BACKPORT: tee: shm: add accessors for buffer size and page offset
BACKPORT: tee: add register user memory
BACKPORT: tee: flexible shared memory pool creation
BACKPORT: optee: support asynchronous supplicant requests
BACKPORT: tee: add TEE_IOCTL_PARAM_ATTR_META
BACKPORT: tee: add tee_param_is_memref() for driver use
f2fs: fix build error with multiply defined inode_nohighmem()
BACKPORT: xfrm: Fix return value check of copy_sec_ctx.
time: Fix ktime_get_raw() incorrect base accumulation
sched/fair: prevent possible infinite loop in sched_group_energy
UPSTREAM: MIPS: Fix build of compressed image
ANDROID: qtaguid: Fix the UAF probelm with tag_ref_tree
UPSTREAM: ANDROID: binder: remove waitqueue when thread exits.
UPSTREAM: arm64/efi: Make strnlen() available to the EFI namespace
UPSTREAM: ARM: boot: Add an implementation of strnlen for libfdt
ANDROID: MIPS: Add ranchu[32r5|32r6|64]_defconfig
FROMLIST: tty: goldfish: Enable 'earlycon' only if built-in
FROMLIST: MIPS: ranchu: Add Ranchu as a new generic-based board
FROMLIST: MIPS: Add noexec=on|off kernel parameter
FROMLIST: MIPS: CPC: Map registers using DT in mips_cpc_default_phys_base()
FROMLIST: dt-bindings: Document mti,mips-cpc binding
FROMLIST: MIPS: math-emu: Mark fall throughs in switch statements with a comment
FROMLIST: MIPS: math-emu: Avoid multiple assignment
FROMLIST: MIPS: math-emu: Avoid an assignment within if statement condition
FROMLIST: MIPS: math-emu: Declare function srl128() as static
FROMLIST: MIPS: math-emu: Avoid definition duplication for macro DPXMULT()
FROMLIST: MIPS: math-emu: Remove an unnecessary header inclusion
UPSTREAM: scripts/dtc: Update to upstream version 0931cea3ba20
UPSTREAM: scripts/dtc: dt_to_config - kernel config options for a devicetree
UPSTREAM: scripts/dtc: Update to upstream version 53bf130b1cdd
UPSTREAM: scripts/dtc: Update to upstream commit b06e55c88b9b
UPSTREAM: scripts/dtc: dtx_diff - add info to error message
UPSTREAM: dtc: create tool to diff device trees
UPSTREAM: config: android-base: disable CONFIG_NFSD and CONFIG_NFS_FS
UPSTREAM: config: android-base: add CGROUP_BPF
UPSTREAM: config: android-base: add CONFIG_MODULES option
UPSTREAM: config: android-base: add CONFIG_IKCONFIG option
UPSTREAM: config: android-base: disable CONFIG_USELIB and CONFIG_FHANDLE
UPSTREAM: config: android-base: enable hardened usercopy and kernel ASLR
UPSTREAM: config: android: enable CONFIG_SECCOMP
UPSTREAM: config: android: set SELinux as default security mode
UPSTREAM: config: android: move device mapper options to recommended
UPSTREAM: config/android: Remove CONFIG_IPV6_PRIVACY
UPSTREAM: config: add android config fragments
BACKPORT: MIPS: generic: Add a MAINTAINERS entry
BACKPORT: irqchip/irq-goldfish-pic: Add Goldfish PIC driver
UPSTREAM: dt-bindings/goldfish-pic: Add device tree binding for Goldfish PIC driver
UPSTREAM: MIPS: Allow storing pgd in C0_CONTEXT for MIPSr6
UPSTREAM: MIPS: CPS: Handle spurious VP starts more gracefully
UPSTREAM: MIPS: CPS: Handle cores not powering down more gracefully
UPSTREAM: MIPS: CPS: Prevent multi-core with dcache aliasing
UPSTREAM: MIPS: CPS: Select CONFIG_SYS_SUPPORTS_SCHED_SMT for MIPSr6
UPSTREAM: MIPS: CM: WARN on attempt to lock invalid VP, not BUG
UPSTREAM: MIPS: CM: Avoid per-core locking with CM3 & higher
UPSTREAM: MIPS: smp-cps: Avoid BUG() when offlining pre-r6 CPUs
UPSTREAM: MIPS: smp-cps: Add support for CPU hotplug of MIPSr6 processors
UPSTREAM: MIPS: generic: Bump default NR_CPUS to 16
UPSTREAM: MIPS: pm-cps: Change FSB workaround to CPU blacklist
UPSTREAM: MIPS: Fix early CM probing
UPSTREAM: MIPS: smp-cps: Stop printing EJTAG exceptions to UART
UPSTREAM: MIPS: smp-cps: Add nothreads kernel parameter
UPSTREAM: MIPS: smp-cps: Support MIPSr6 Virtual Processors
UPSTREAM: MIPS: smp-cps: Skip core setup if coherent
UPSTREAM: MIPS: smp-cps: Pull boot config retrieval out of mips_cps_boot_vpes
UPSTREAM: MIPS: smp-cps: Pull cache init into a function
UPSTREAM: MIPS: smp-cps: Ensure our VP ident calculation is correct
UPSTREAM: irqchip: mips-gic: Provide VP ID accessor
UPSTREAM: irqchip: mips-gic: Use HW IDs for VPE_OTHER_ADDR
UPSTREAM: MIPS: CM: Fix mips_cm_max_vp_width for UP kernels
UPSTREAM: MIPS: CM: Add CM GCR_BEV_BASE accessors
UPSTREAM: MIPS: CPC: Add start, stop and running CM3 CPC registers
UPSTREAM: MIPS: pm-cps: Avoid offset overflow on MIPSr6
UPSTREAM: MIPS: traps: Make sure secondary cores have a sane ebase register
UPSTREAM: MIPS: Detect MIPSr6 Virtual Processor support
UPSTREAM: Documentation: Add device tree binding for Goldfish FB driver
UPSTREAM: MIPS: math-emu: Use preferred flavor of unsigned integer declarations
UPSTREAM: MIPS: math-emu: <MADDF|MSUBF>.D: Fix accuracy (64-bit case)
UPSTREAM: MIPS: math-emu: <MADDF|MSUBF>.S: Fix accuracy (32-bit case)
UPSTREAM: MIPS: Update Goldfish RTC driver maintainer email address
UPSTREAM: MIPS: Update RINT emulation maintainer email address
UPSTREAM: MIPS: math-emu: do not use bools for arithmetic
UPSTREAM: rtc: goldfish: Add RTC driver for Android emulator
BACKPORT: dt-bindings: Add device tree binding for Goldfish RTC driver
UPSTREAM: tty: goldfish: Implement support for kernel 'earlycon' parameter
UPSTREAM: tty: goldfish: Use streaming DMA for r/w operations on Ranchu platforms
UPSTREAM: tty: goldfish: Refactor constants to better reflect their nature
UPSTREAM: MIPS: math-emu: Add FP emu debugfs stats for individual instructions
UPSTREAM: MIPS: math-emu: Add FP emu debugfs clear functionality
UPSTREAM: MIPS: math-emu: Add FP emu debugfs statistics for branches
BACKPORT: MIPS: math-emu: CLASS.D: Zero bits 32-63 of the result
BACKPORT: MIPS: math-emu: RINT.<D|S>: Fix several problems by reimplementation
UPSTREAM: MIPS: math-emu: CMP.Sxxx.<D|S>: Prevent occurrences of SIGILL crashes
UPSTREAM: MIPS: math-emu: <MADDF|MSUBF>.<D|S>: Clean up "maddf_flags" enumeration
UPSTREAM: MIPS: math-emu: <MADDF|MSUBF>.<D|S>: Fix some cases of zero inputs
UPSTREAM: MIPS: math-emu: <MADDF|MSUBF>.<D|S>: Fix some cases of infinite inputs
UPSTREAM: MIPS: math-emu: <MADDF|MSUBF>.<D|S>: Fix NaN propagation
UPSTREAM: tty: goldfish: Fix a parameter of a call to free_irq
UPSTREAM: MIPS: VDSO: Fix clobber lists in fallback code paths
UPSTREAM: MIPS: VDSO: Fix a mismatch between comment and preprocessor constant
UPSTREAM: MIPS: VDSO: Add implementation of gettimeofday() fallback
UPSTREAM: MIPS: VDSO: Add implementation of clock_gettime() fallback
UPSTREAM: MIPS: VDSO: Fix conversions in do_monotonic()/do_monotonic_coarse()
UPSTREAM: MIPS: unaligned: Add DSP lwx & lhx missaligned access support
UPSTREAM: MIPS: build: Fix "-modd-spreg" switch usage when compiling for mips32r6
UPSTREAM: MIPS: cmdline: Add support for 'memmap' parameter
UPSTREAM: MIPS: math-emu: Handle zero accumulator case in MADDF and MSUBF separately
UPSTREAM: MIPS: Support per-device DMA coherence
UPSTREAM: MIPS: dma-default: Don't check hw_coherentio if device is non-coherent
UPSTREAM: MIPS: Sanitise coherentio semantics
UPSTREAM: MIPS: CPC: Provide default mips_cpc_default_phys_base to ignore CPC
UPSTREAM: MIPS: generic: Introduce generic DT-based board support
UPSTREAM: MIPS: Support generating Flattened Image Trees (.itb)
UPSTREAM: MIPS: Allow emulation for unaligned [LS]DXC1 instructions
UPSTREAM: MIPS: math-emu: Fix BC1EQZ and BC1NEZ condition handling
UPSTREAM: MIPS: r2-on-r6-emu: Clear BLTZALL and BGEZALL debugfs counters
UPSTREAM: MIPS: r2-on-r6-emu: Fix BLEZL and BGTZL identification
UPSTREAM: MIPS: remove aliasing alignment if HW has antialising support
BACKPORT: MIPS: store the appended dtb address in a variable
UPSTREAM: MIPS: Fix FCSR Cause bit handling for correct SIGFPE issue
UPSTREAM: MIPS: kernel: Audit and remove any unnecessary uses of module.h
UPSTREAM: MIPS: c-r4k: Fix sigtramp SMP call to use kmap
UPSTREAM: MIPS: c-r4k: Fix protected_writeback_scache_line for EVA
UPSTREAM: MIPS: Spelling fix lets -> let's
UPSTREAM: MIPS: R6: Fix typo
UPSTREAM: MIPS: traps: Correct the SIGTRAP debug ABI in `do_watch' and `do_trap_or_bp'
UPSTREAM: MIPS: inst.h: Rename cbcond{0,1}_op to pop{1,3}0_op
UPSTREAM: MIPS: inst.h: Rename b{eq,ne}zcji[al]c_op to pop{6,7}6_op
UPSTREAM: MIPS: math-emu: Fix m{add,sub}.s shifts
UPSTREAM: MIPS: inst: Declare fsel_op for sel.fmt instruction
UPSTREAM: MIPS: math-emu: Fix code indentation
UPSTREAM: MIPS: math-emu: Fix bit-width in ieee754dp_{mul, maddf, msubf} comments
UPSTREAM: MIPS: math-emu: Add z argument macros
UPSTREAM: MIPS: math-emu: Unify ieee754dp_m{add,sub}f
UPSTREAM: MIPS: math-emu: Unify ieee754sp_m{add,sub}f
UPSTREAM: MIPS: math-emu: Emulate MIPSr6 sel.fmt instruction
UPSTREAM: MIPS: math-emu: Fix BC1{EQ,NE}Z emulation
UPSTREAM: MIPS: math-emu: Always propagate sNaN payload in quieting
UPSTREAM: MIPS: Fix misspellings in comments.
UPSTREAM: MIPS: math-emu: Add IEEE Std 754-2008 NaN encoding emulation
UPSTREAM: MIPS: math-emu: Add IEEE Std 754-2008 ABS.fmt and NEG.fmt emulation
UPSTREAM: MIPS: non-exec stack & heap when non-exec PT_GNU_STACK is present
UPSTREAM: MIPS: Add IEEE Std 754 conformance mode selection
UPSTREAM: MIPS: Determine the presence of IEEE Std 754-2008 features
UPSTREAM: MIPS: Define the legacy-NaN and 2008-NaN features
UPSTREAM: MIPS: ELF: Interpret the NAN2008 file header flag
UPSTREAM: ELF: Also pass any interpreter's file header to `arch_check_elf'
UPSTREAM: MIPS: Use a union to access the ELF file header
UPSTREAM: MIPS: Fix delay slot emulation count in debugfs
BACKPORT: exit_thread: accept a task parameter to be exited
UPSTREAM: mn10300: let exit_fpu accept a task
UPSTREAM: MIPS: Use per-mm page to execute branch delay slot instructions
BACKPORT: s390: get rid of exit_thread()
BACKPORT: exit_thread: remove empty bodies
UPSTREAM: MIPS: Make flush_thread
UPSTREAM: MIPS: Properly disable FPU in start_thread()
UPSTREAM: MIPS: Select CONFIG_HANDLE_DOMAIN_IRQ and make it work.
UPSTREAM: MIPS: math-emu: Fix typo
UPSTREAM: MIPS: math-emu: dsemul: Remove an unused bit in ADDIUPC emulation
UPSTREAM: MIPS: math-emu: dsemul: Reduce `get_isa16_mode' clutter
UPSTREAM: MIPS: math-emu: dsemul: Correct description of the emulation frame
UPSTREAM: MIPS: math-emu: Correct the emulation of microMIPS ADDIUPC instruction
UPSTREAM: MIPS: math-emu: Make microMIPS branch delay slot emulation work
UPSTREAM: MIPS: math-emu: dsemul: Fix ill formatting of microMIPS part
UPSTREAM: MIPS: math-emu: Correctly handle NOP emulation
ANDROID: sdcardfs: Protect set_top
ANDROID: fsnotify: Notify lower fs of open
Revert "ANDROID: sdcardfs: notify lower file of opens"
ANDROID: sdcardfs: Use lower getattr times/size
ANDROID: sched/rt: schedtune: Add boost retention to RT
ANDROID: sched: EAS: check energy_aware() before calling select_energy_cpu_brute() in up-migrate path
UPSTREAM: eventpoll.h: add missing epoll event masks
ANDROID: xattr: Pass EOPNOTSUPP to permission2
ANDROID: sdcardfs: Move default_normal to superblock
blkdev: Refactoring block io latency histogram codes
FROMLIST: arm64: kpti: Fix the interaction between ASID switching and software PAN
FROMLIST: arm64: Move post_ttbr_update_workaround to C code
FROMLIST: arm64: mm: Rename post_ttbr0_update_workaround
sched: EAS: Initialize push_task as NULL to avoid direct reference on out_unlock path
fscrypt: updates on 4.15-rc4
ANDROID: uid_sys_stats: fix the comment
BACKPORT: optee: fix invalid of_node_put() in optee_driver_init()
BACKPORT: tee: optee: sync with new naming of interrupts
BACKPORT: tee: indicate privileged dev in gen_caps
BACKPORT: tee: optee: interruptible RPC sleep
BACKPORT: tee: optee: add const to tee_driver_ops and tee_desc structures
BACKPORT: tee: tee_shm: Constify dma_buf_ops structures.
BACKPORT: tee: add forward declaration for struct device
BACKPORT: tee: optee: fix uninitialized symbol 'parg'
BACKPORT: tee.txt: standardize document format
BACKPORT: tee: add ARM_SMCCC dependency
BACKPORT: selinux: nlmsgtab: add SOCK_DESTROY to the netlink mapping tables
clocksource: arch_timer: make virtual counter access configurable
arm64: issue isb when trapping CNTVCT_EL0 access
BACKPORT: arm64: Add CNTFRQ_EL0 trap handler
BACKPORT: arm64: Add CNTVCT_EL0 trap handler
ANDROID: sdcardfs: Fix missing break on default_normal
ANDROID: usb: f_fs: Prevent gadget unbind if it is already unbound
arm64: Kconfig: Reword UNMAP_KERNEL_AT_EL0 kconfig entry
arm64: use RET instruction for exiting the trampoline
FROMLIST: arm64: kaslr: Put kernel vectors address in separate data page
FROMLIST: arm64: mm: Introduce TTBR_ASID_MASK for getting at the ASID in the TTBR
FROMLIST: arm64: Kconfig: Add CONFIG_UNMAP_KERNEL_AT_EL0
FROMLIST: arm64: entry: Add fake CPU feature for unmapping the kernel at EL0
FROMLIST: arm64: tls: Avoid unconditional zeroing of tpidrro_el0 for native tasks
FROMLIST: arm64: erratum: Work around Falkor erratum #E1003 in trampoline code
FROMLIST: arm64: entry: Hook up entry trampoline to exception vectors
FROMLIST: arm64: entry: Explicitly pass exception level to kernel_ventry macro
FROMLIST: arm64: mm: Map entry trampoline into trampoline and kernel page tables
FROMLIST: arm64: entry: Add exception trampoline page for exceptions from EL0
FROMLIST: arm64: mm: Invalidate both kernel and user ASIDs when performing TLBI
FROMLIST: arm64: mm: Add arm64_kernel_unmapped_at_el0 helper
FROMLIST: arm64: mm: Allocate ASIDs in pairs
FROMLIST: arm64: mm: Fix and re-enable ARM64_SW_TTBR0_PAN
FROMLIST: arm64: mm: Move ASID from TTBR0 to TTBR1
FROMLIST: arm64: mm: Temporarily disable ARM64_SW_TTBR0_PAN
FROMLIST: arm64: mm: Use non-global mappings for kernel space
UPSTREAM: arm64: factor out entry stack manipulation
UPSTREAM: arm64: tlbflush.h: add __tlbi() macro
ANDROID: sdcardfs: Add default_normal option
ANDROID: sdcardfs: notify lower file of opens
ANDROID: binder: Remove obsolete proc waitqueue.
UPSTREAM: arm64: setup: introduce kaslr_offset()
UPSTREAM: kcov: fix comparison callback signature
UPSTREAM: kcov: support comparison operands collection
UPSTREAM: kcov: remove pointless current != NULL check
UPSTREAM: kcov: support compat processes
UPSTREAM: kcov: simplify interrupt check
UPSTREAM: kcov: make kcov work properly with KASLR enabled
UPSTREAM: kcov: add more missing includes
UPSTREAM: kcov: add missing #include <linux/sched.h>
UPSTREAM: kcov: properly check if we are in an interrupt
UPSTREAM: kcov: don't profile branches in kcov
UPSTREAM: kcov: don't trace the code coverage code
BACKPORT: kernel: add kcov code coverage
BACKPORT: irq: Make the irqentry text section unconditional
UPSTREAM: arch, ftrace: for KASAN put hard/soft IRQ entries into separate sections
UPSTREAM: x86, kasan, ftrace: Put APIC interrupt handlers into .irqentry.text
UPSTREAM: kasan: make get_wild_bug_type() static
UPSTREAM: kasan: separate report parts by empty lines
UPSTREAM: kasan: improve double-free report format
UPSTREAM: kasan: print page description after stacks
UPSTREAM: kasan: improve slab object description
UPSTREAM: kasan: change report header
UPSTREAM: kasan: simplify address description logic
UPSTREAM: kasan: change allocation and freeing stack traces headers
UPSTREAM: kasan: unify report headers
UPSTREAM: kasan: introduce helper functions for determining bug type
BACKPORT: kasan: report only the first error by default
UPSTREAM: kasan: fix races in quarantine_remove_cache()
UPSTREAM: kasan: resched in quarantine_remove_cache()
BACKPORT: kasan, sched/headers: Uninline kasan_enable/disable_current()
BACKPORT: kasan: drain quarantine of memcg slab objects
UPSTREAM: kasan: eliminate long stalls during quarantine reduction
UPSTREAM: kasan: support panic_on_warn
UPSTREAM: x86/suspend: fix false positive KASAN warning on suspend/resume
UPSTREAM: kasan: support use-after-scope detection
UPSTREAM: kasan/tests: add tests for user memory access functions
UPSTREAM: mm, kasan: add a ksize() test
UPSTREAM: kasan: test fix: warn if the UAF could not be detected in kmalloc_uaf2
UPSTREAM: kasan: modify kmalloc_large_oob_right(), add kmalloc_pagealloc_oob_right()
UPSTREAM: lib/stackdepot: export save/fetch stack for drivers
UPSTREAM: lib/stackdepot.c: bump stackdepot capacity from 16MB to 128MB
BACKPORT: kprobes: Unpoison stack in jprobe_return() for KASAN
UPSTREAM: kasan: remove the unnecessary WARN_ONCE from quarantine.c
UPSTREAM: kasan: avoid overflowing quarantine size on low memory systems
UPSTREAM: kasan: improve double-free reports
BACKPORT: mm: coalesce split strings
BACKPORT: mm/kasan: get rid of ->state in struct kasan_alloc_meta
UPSTREAM: mm/kasan: get rid of ->alloc_size in struct kasan_alloc_meta
UPSTREAM: mm: kasan: remove unused 'reserved' field from struct kasan_alloc_meta
UPSTREAM: mm/kasan, slub: don't disable interrupts when object leaves quarantine
UPSTREAM: mm/kasan: don't reduce quarantine in atomic contexts
UPSTREAM: mm/kasan: fix corruptions and false positive reports
UPSTREAM: lib/stackdepot.c: use __GFP_NOWARN for stack allocations
BACKPORT: mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB
UPSTREAM: kasan/quarantine: fix bugs on qlist_move_cache()
UPSTREAM: mm: mempool: kasan: don't poot mempool objects in quarantine
UPSTREAM: kasan: change memory hot-add error messages to info messages
BACKPORT: mm/kasan: add API to check memory regions
UPSTREAM: mm/kasan: print name of mem[set,cpy,move]() caller in report
UPSTREAM: mm: kasan: initial memory quarantine implementation
UPSTREAM: lib/stackdepot: avoid to return 0 handle
UPSTREAM: lib/stackdepot.c: allow the stack trace hash to be zero
UPSTREAM: mm, kasan: fix compilation for CONFIG_SLAB
BACKPORT: mm, kasan: stackdepot implementation. Enable stackdepot for SLAB
BACKPORT: mm, kasan: add GFP flags to KASAN API
UPSTREAM: mm, kasan: SLAB support
UPSTREAM: mm/slab: align cache size first before determination of OFF_SLAB candidate
UPSTREAM: mm/slab: use more appropriate condition check for debug_pagealloc
UPSTREAM: mm/slab: factor out debugging initialization in cache_init_objs()
UPSTREAM: mm/slab: remove object status buffer for DEBUG_SLAB_LEAK
UPSTREAM: mm/slab: alternative implementation for DEBUG_SLAB_LEAK
UPSTREAM: mm/slab: clean up DEBUG_PAGEALLOC processing code
UPSTREAM: mm/slab: activate debug_pagealloc in SLAB when it is actually enabled
sched: EAS/WALT: Don't take into account of running task's util
BACKPORT: schedutil: Reset cached freq if it is not in sync with next_freq
UPSTREAM: kasan: add functions to clear stack poison
ANDROID: initramfs: call free_initrd() when skipping init
Documentation: tee subsystem and op-tee driver
tee: add OP-TEE driver
tee: generic TEE subsystem
dt/bindings: add bindings for optee
kernel.h: add u64_to_user_ptr()
ARM: 8481/2: drivers: psci: replace psci firmware calls
ARM: 8480/2: arm64: add implementation for arm-smccc
ARM: 8479/2: add implementation for arm-smccc
ARM: 8478/2: arm/arm64: add arm-smccc
UPSTREAM: net: xfrm: allow clearing socket xfrm policies.
BACKPORT: time: Clean up CLOCK_MONOTONIC_RAW time handling
BACKPORT: time: Fix CLOCK_MONOTONIC_RAW sub-nanosecond accounting
UPSTREAM: arm64: vdso: fix clock_getres for 4GiB-aligned res
f2fs: updates on 4.15-rc1
UPSTREAM: android: binder: fix type mismatch warning
BACKPORT: arm64: Use __pa_symbol for empty_zero_page
BACKPORT: arm64: Use __pa_symbol for kernel symbols
UPSTREAM: mm: Introduce lm_alias
FROMLIST: binder: fix proc->files use-after-free
ANDROID: binder: clarify deferred thread work.
FROMLIST: arm64: Avoid aligning normal memory pointers in __memcpy_{to,from}io
BACKPORT: xfrm: Clear sk_dst_cache when applying per-socket policy.
Revert "ANDROID: sched/rt: schedtune: Add boost retention to RT"
cpufreq: Drop schedfreq governor
ANDROID: sched/rt: schedtune: Add boost retention to RT
ANDROID: sched/rt: add schedtune accounting
ANDROID: Revert "arm64: move ELF_ET_DYN_BASE to 4GB / 4MB"
ANDROID: Revert "arm: move ELF_ET_DYN_BASE to 4MB"
sched: EAS: Fix the calculation of group util in group_idle_state()
sched: EAS: update trg_cpu to backup_cpu if no energy saving for target_cpu
sched: EAS: Fix the condition to distinguish energy before/after
sched: EAS: upmigrate misfit current task
sched: avoid pushing tasks to an offline CPU
sched: Extend active balance to accept 'push_task' argument
Revert "sched/core: Warn if ENERGY_AWARE is enabled but data is missing"
Revert "sched/core: fix have_sched_energy_data build warning"
FROMLIST: kbuild: clang: fix build failures with sparse check
Revert "Revert "UPSTREAM: efi/libstub/arm64: Set -fpie when building the EFI stub""
BACKPORT: efi/libstub: Unify command line param parsing
BACKPORT: arm64: relocatable: suppress R_AARCH64_ABS64 relocations in vmlinux
sched/core: fix have_sched_energy_data build warning
sched/core: Warn if ENERGY_AWARE is enabled but data is missing
sched: walt: Correct WALT window size initialization
FROMLIST: sched/fair: Use wake_q length as a hint for wake_wide
sched: WALT: account cumulative window demand
sched/fair: remove useless variable in find_best_target
sched/tune: access schedtune_initialized under CGROUP_SCHEDTUNE
sched/fair: consider task utilization in group_max_util()
sched/fair: consider task utilization in group_norm_util()
sched/fair: enforce EAS mode
sched/fair: ignore backup CPU when not valid
sched/fair: trace energy_diff for non boosted tasks
UPSTREAM: sched/fair: Sync task util before slow-path wakeup
UPSTREAM: sched/fair: Fix usage of find_idlest_group() when the local group is idlest
UPSTREAM: sched/fair: Fix usage of find_idlest_group() when no groups are allowed
BACKPORT: sched/fair: Fix find_idlest_group when local group is not allowed
UPSTREAM: sched/fair: Remove unnecessary comparison with -1
BACKPORT: sched/fair: Move select_task_rq_fair slow-path into its own function
UPSTREAM: sched/fair: Force balancing on nohz balance if local group has capacity
UPSTREAM: sched/core: Add missing update_rq_clock() call in set_user_nice()
UPSTREAM: sched/core: Add missing update_rq_clock() call for task_hot()
UPSTREAM: sched/core: Add missing update_rq_clock() in detach_task_cfs_rq()
UPSTREAM: sched/core: Add missing update_rq_clock() in post_init_entity_util_avg()
UPSTREAM: sched/core: Fix find_idlest_group() for fork
BACKPORT: sched/fair: Fix PELT integrity for new tasks
BACKPORT: sched/cgroup: Fix cpu_cgroup_fork() handling
UPSTREAM: sched/fair: Fix and optimize the fork() path
BACKPORT: sched/fair: Make it possible to account fair load avg consistently
cpufreq/sched: Consider max cpu capacity when choosing frequencies
ANDROID: binder: show high watermark of alloc->pages.
ANDROID: binder: Add thread->process_todo flag.
UPSTREAM: arm64: compat: Remove leftover variable declaration
ANDROID: sched/fair: Select correct capacity state for energy_diff
Revert "UPSTREAM: efi/libstub/arm64: Set -fpie when building the EFI stub"
cpufreq: schedutil: clamp util to CPU maximum capacity
FROMLIST: android: binder: Fix null ptr dereference in debug msg
FROMLIST: android: binder: Change binder_shrinker to static
cpufreq/sched: Use cpu max freq rather than policy max
trace: sched: Fix util_avg_walt in sched_load_avg_cpu trace
sched/fair: remove erroneous RCU_LOCKDEP_WARN from start_cpu()
sched: EAS/WALT: finish accounting prior to task_tick
cpufreq: sched: update capacity request upon tick always
sched/fair: prevent meaningless active migration
sched: walt: Leverage existing helper APIs to apply invariance
ANDROID: HACK: arm64: use -mno-implicit-float instead of -mgeneral-regs-only
sched: Update task->on_rq when tasks are moving between runqueues
FROMLIST: f2fs: expose some sectors to user in inline data or dentry case
crypto: Work around deallocated stack frame reference gcc bug on sparc.
UPSTREAM: f2fs: fix potential panic during fstrim
ANDROID: fscrypt: remove unnecessary fscrypto.h
ANDROID: binder: fix node sched policy calculation
ANDROID: binder: init desired_prio.sched_policy before use it
BACKPORT: net: xfrm: support setting an output mark.
UPSTREAM: xfrm: Only add l3mdev oif to dst lookups
UPSTREAM: net: l3mdev: Add master device lookup by index
ANDROID: Kbuild, LLVMLinux: allow overriding clang target triple
CHROMIUM: arm64: Disable asm-operand-width warning for clang
CHROMIUM: kbuild: clang: Disable the 'duplicate-decl-specifier' warning
UPSTREAM: x86/build: Use cc-option to validate stack alignment parameter
UPSTREAM: x86/build: Fix stack alignment for CLang
UPSTREAM: efi/libstub/arm64: Set -fpie when building the EFI stub
BACKPORT: efi/libstub/arm64: Force 'hidden' visibility for section markers
UPSTREAM: compiler, clang: always inline when CONFIG_OPTIMIZE_INLINING is disabled
UPSTREAM: x86/boot: #undef memcpy() et al in string.c
UPSTREAM: crypto: arm64/sha - avoid non-standard inline asm tricks
UPSTREAM: kbuild: clang: Disable 'address-of-packed-member' warning
UPSTREAM: x86/build: Specify stack alignment for clang
UPSTREAM: x86/build: Use __cc-option for boot code compiler options
BACKPORT: kbuild: Add __cc-option macro
UPSTREAM: x86/hweight: Don't clobber %rdi
BACKPORT: x86/hweight: Get rid of the special calling convention
BACKPORT: x86/mm/kaslr: Use the _ASM_MUL macro for multiplication to work around Clang incompatibility
UPSTREAM: crypto, x86: aesni - fix token pasting for clang
UPSTREAM: x86/kbuild: Use cc-option to enable -falign-{jumps/loops}
UPSTREAM: compiler, clang: properly override 'inline' for clang
UPSTREAM: compiler, clang: suppress warning for unused static inline functions
UPSTREAM: Kbuild: provide a __UNIQUE_ID for clang
UPSTREAM: modules: mark __inittest/__exittest as __maybe_unused
BACKPORT: kbuild: Add support to generate LLVM assembly files
UPSTREAM: kbuild: use -Oz instead of -Os when using clang
BACKPORT: kbuild, LLVMLinux: Add -Werror to cc-option to support clang
UPSTREAM: kbuild: drop -Wno-unknown-warning-option from clang options
UPSTREAM: kbuild: fix asm-offset generation to work with clang
UPSTREAM: kbuild: consolidate redundant sed script ASM offset generation
UPSTREAM: kbuild: Consolidate header generation from ASM offset information
UPSTREAM: kbuild: clang: add -no-integrated-as to KBUILD_[AC]FLAGS
UPSTREAM: kbuild: Add better clang cross build support
FROMLIST: tracing: Add support for preempt and irq enable/disable events
FROMLIST: tracing: Prepare to add preempt and irq trace events
ANDROID: binder: fix transaction leak.
ANDROID: binder: Add tracing for binder priority inheritance.
f2fs: catch up to v4.14-rc1
UPSTREAM: cpufreq: schedutil: use now as reference when aggregating shared policy requests
ANDROID: add script to fetch android kernel config fragments
f2fs: reorganize stat information
f2fs: clean up flush/discard command namings
f2fs: check in-memory sit version bitmap
f2fs: check in-memory nat version bitmap
f2fs: check in-memory block bitmap
f2fs: introduce FI_ATOMIC_COMMIT
f2fs: clean up with list_{first, last}_entry
f2fs: return fs_trim if there is no candidate
f2fs: avoid needless checkpoint in f2fs_trim_fs
f2fs: relax async discard commands more
f2fs: drop exist_data for inline_data when truncated to 0
f2fs: don't allow encrypted operations without keys
f2fs: show the max number of atomic operations
f2fs: get io size bit from mount option
f2fs: support IO alignment for DATA and NODE writes
f2fs: add submit_bio tracepoint
f2fs: reassign new segment for mode=lfs
f2fs: fix a missing discard prefree segments
f2fs: use rb_entry_safe
f2fs: add a case of no need to read a page in write begin
f2fs: fix a problem of using memory after free
f2fs: remove unneeded condition
f2fs: don't cache nat entry if out of memory
f2fs: remove unused values in recover_fsync_data
f2fs: support async discard based on v4.9
f2fs: resolve op and op_flags confilcts
f2fs: remove wrong backported codes
FROMLIST: binder: fix use-after-free in binder_transaction()
UPSTREAM: ipv6: fib: Unlink replaced routes from their nodes
f2fs: fix a missing size change in f2fs_setattr
f2fs: fix to access nullified flush_cmd_control pointer
f2fs: free meta pages if sanity check for ckpt is failed
f2fs: detect wrong layout
f2fs: call sync_fs when f2fs is idle
Revert "f2fs: use percpu_counter for # of dirty pages in inode"
f2fs: return AOP_WRITEPAGE_ACTIVATE for writepage
f2fs: do not activate auto_recovery for fallocated i_size
f2fs: fix 32-bit build
f2fs: fix incorrect free inode count in ->statfs
f2fs: drop duplicate header timer.h
f2fs: fix wrong AUTO_RECOVER condition
f2fs: do not recover i_size if it's valid
f2fs: fix fdatasync
f2fs: fix to account total free nid correctly
f2fs: fix an infinite loop when flush nodes in cp
f2fs: don't wait writeback for datas during checkpoint
f2fs: fix wrong written_valid_blocks counting
f2fs: avoid BG_GC in f2fs_balance_fs
f2fs: fix redundant block allocation
f2fs: use err for f2fs_preallocate_blocks
f2fs: support multiple devices
f2fs: allow dio read for LFS mode
f2fs: revert segment allocation for direct IO
f2fs: return directly if block has been removed from the victim
Revert "f2fs: do not recover from previous remained wrong dnodes"
f2fs: remove checkpoint in f2fs_freeze
f2fs: assign segments correctly for direct_io
f2fs: fix wrong i_atime recovery
f2fs: record inode updating status correctly
f2fs: Trace reset zone events
f2fs: Reset sequential zones on zoned block devices
f2fs: Cache zoned block devices zone type
f2fs: Do not allow adaptive mode for host-managed zoned block devices
f2fs: Always enable discard for zoned blocks devices
f2fs: Suppress discard warning message for zoned block devices
f2fs: Check zoned block feature for host-managed zoned block devices
f2fs: Use generic zoned block device terminology
f2fs: Add missing break in switch-case
f2fs: avoid infinite loop in the EIO case on recover_orphan_inodes
f2fs: report error of f2fs_fill_dentries
fs/crypto: catch up 4.9-rc6
f2fs: hide a maybe-uninitialized warning
f2fs: remove percpu_count due to performance regression
f2fs: make clean inodes when flushing inode page
f2fs: keep dirty inodes selectively for checkpoint
f2fs: Replace CURRENT_TIME_SEC with current_time() for inode timestamps
f2fs: use BIO_MAX_PAGES for bio allocation
f2fs: declare static function for __build_free_nids
f2fs: call f2fs_balance_fs for setattr
f2fs: count dirty inodes to flush node pages during checkpoint
f2fs: avoid casted negative value as shrink count
f2fs: don't interrupt free nids building during nid allocation
f2fs: clean up free nid list operations
f2fs: split free nid list
f2fs: clear nlink if fail to add_link
f2fs: fix sparse warnings
f2fs: fix error handling in fsync_node_pages
f2fs: fix to update largest extent under lock
f2fs: be aware of extent beyond EOF in fiemap
f2fs: don't miss any f2fs_balance_fs cases
f2fs: add missing f2fs_balance_fs in f2fs_zero_range
f2fs: give a chance to detach from dirty list
f2fs: fix to release discard entries during checkpoint
f2fs: exclude free nids building and allocation
f2fs: fix to determine start_cp_addr by sbi->cur_cp_pack
f2fs: fix overflow due to condition check order
posix_acl: Clear SGID bit when setting file permissions
f2fs: fix wrong sum_page pointer in f2fs_gc
f2fs: backport from (4c1fad64 - Merge tag 'for-f2fs-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs)
Revert "ANDROID: sched/tune: Initialize raw_spin_lock in boosted_groups"
BACKPORT: partial: mm, oom_reaper: do not mmput synchronously from the oom reaper context
FROMLIST: android: binder: Don't get mm from task
FROMLIST: android: binder: Remove unused vma argument
FROMLIST: android: binder: Drop lru lock in isolate callback
ANDROID: configs: remove config fragments
drivers: cpufreq_interactive: handle error for module load fail
UPSTREAM: Fix build break in fork.c when THREAD_SIZE < PAGE_SIZE
ANDROID: sdcardfs: Add missing break
ANDROID: Sdcardfs: Move gid derivation under flag
ANDROID: mnt: Fix freeing of mount data
drivers: cpufreq: checks to avoid kernel crash in cpufreq_interactive
ANDROID: Use sk_uid to replace uid get from socket file
ANDROID: nf: xt_qtaguid: fix handling for cases where tunnels are used.
Revert "ANDROID: Use sk_uid to replace uid get from socket file"
ANDROID: fiq_debugger: Fix minor bug in code
ANDROID: cpufreq-dt: Set sane defaults for schedutil rate limits
BACKPORT: cpufreq: schedutil: Use policy-dependent transition delays
FROMLIST: binder: fix an ret value override
FROMLIST: binder: fix memory corruption in binder_transaction binder
sched: WALT: fix window mis-alignment
sched: EAS: kill incorrect nohz idle cpu kick
sched: EAS: fix incorrect energy delta calculation due to rounding error
sched: EAS/WALT: take into account of waking task's load
cpufreq: sched: WALT: don't apply capacity margin twice
sched: WALT: fix potential overflow
sched: EAS: schedfreq: fix CPU util over estimation
sched: EAS/WALT: use cr_avg instead of prev_runnable_sum
sched: WALT: fix broken cumulative runnable average accounting
sched: deadline: WALT: account cumulative runnable avg
FROMLIST: android: binder: Add page usage in binder stats
FROMLIST: android: binder: Add shrinker tracepoints
FROMLIST: android: binder: Add global lru shrinker to binder
FROMLIST: android: binder: Move buffer out of area shared with user space
FROMLIST: android: binder: Add allocator selftest
FROMLIST: android: binder: Refactor prev and next buffer into a helper function
android: android-base.config: enable IP6_NF_MATCH_RPFILTER
UPSTREAM: cpufreq: schedutil: Use unsigned int for iowait boost
UPSTREAM: cpufreq: schedutil: Make iowait boost more energy efficient
ANDROID: NFC: st21nfca: Fix memory OOB and leak issues in connectivity events handler
ANDROID: check dir value of xfrm_userpolicy_id
ANDROID: NFC: Fix possible memory corruption when handling SHDLC I-Frame commands
ANDROID: nfc: fdp: Fix possible buffer overflow in WCS4000 NFC driver
ANDROID: NFC: st21nfca: Fix out of bounds kernel access when handling ATR_REQ
UPSTREAM: usb: dwc3: gadget: don't send extra ZLP
BACKPORT: usb: dwc3: gadget: handle request->zero
ANDROID: usb: gadget: assign no-op request complete callbacks
ANDROID: usb: gadget: configfs: fix null ptr in android_disconnect
ANDROID: uid_sys_stats: Fix implicit declaration of get_cmdline()
uid_sys_stats: log task io with a debug flag
ANDROID: Use sk_uid to replace uid get from socket file
UPSTREAM: arm64: smp: Prevent raw_smp_processor_id() recursion
UPSTREAM: arm64: restore get_current() optimisation
ANDROID: arm64: Fix a copy-paste error in prior init_thread_info build fix
UPSTREAM: locking: avoid passing around 'thread_info' in mutex debugging code
ANDROID: arm64: fix undeclared 'init_thread_info' error
UPSTREAM: kdb: use task_cpu() instead of task_thread_info()->cpu
ANDROID: keychord: Fix for a memory leak in keychord.
ANDROID: keychord: Fix races in keychord_write.
Use %zu to print resid (size_t).
ANDROID: keychord: Fix a slab out-of-bounds read.
ANDROID: binder: don't queue async transactions to thread.
ANDROID: binder: don't enqueue death notifications to thread todo.
ANDROID: binder: call poll_wait() unconditionally.
android: configs: move quota-related configs to recommended
BACKPORT: arm64: split thread_info from task stack
UPSTREAM: arm64: assembler: introduce ldr_this_cpu
UPSTREAM: arm64: make cpu number a percpu variable
UPSTREAM: arm64: smp: prepare for smp_processor_id() rework
BACKPORT: arm64: move sp_el0 and tpidr_el1 into cpu_suspend_ctx
UPSTREAM: arm64: prep stack walkers for THREAD_INFO_IN_TASK
UPSTREAM: arm64: unexport walk_stackframe
UPSTREAM: arm64: traps: simplify die() and __die()
UPSTREAM: arm64: factor out current_stack_pointer
BACKPORT: arm64: asm-offsets: remove unused definitions
UPSTREAM: arm64: thread_info remove stale items
UPSTREAM: thread_info: include <current.h> for THREAD_INFO_IN_TASK
UPSTREAM: thread_info: factor out restart_block
UPSTREAM: kthread: Pin the stack via try_get_task_stack()/put_task_stack() in to_live_kthread() function
UPSTREAM: sched/core: Add try_get_task_stack() and put_task_stack()
UPSTREAM: sched/core: Allow putting thread_info into task_struct
UPSTREAM: printk: when dumping regs, show the stack, not thread_info
UPSTREAM: fix up initial thread stack pointer vs thread_info confusion
UPSTREAM: Clarify naming of thread info/stack allocators
ANDROID: sdcardfs: override credential for ioctl to lower fs
ANDROID: binder: Don't BUG_ON(!spin_is_locked()).
sched/fair: Add a backup_cpu to find_best_target
sched/fair: Try to estimate possible idle states.
sched/fair: Sync task util before EAS wakeup
Revert "sched/fair: ensure utilization signals are synchronized before use"
sched/fair: kick nohz idle balance for misfit task
sched/fair: Update signals of nohz cpus if we are going idle
events: add tracepoint for find_best_target
sched/fair: streamline find_best_target heuristics
UPSTREAM: af_key: Fix sadb_x_ipsecrequest parsing
ANDROID: lowmemorykiller: Add tgid to kill message
Revert "proc: smaps: Allow smaps access for CAP_SYS_RESOURCE"
ANDROID: android-verity: mark dev as rw for linear target
ANDROID: sdcardfs: Remove unnecessary lock
ANDROID: binder: don't check prio permissions on restore.
Add BINDER_GET_NODE_DEBUG_INFO ioctl
UPSTREAM: cpufreq: schedutil: Trace frequency only if it has changed
UPSTREAM: cpufreq: schedutil: Avoid reducing frequency of busy CPUs prematurely
UPSTREAM: cpufreq: schedutil: Refactor sugov_next_freq_shared()
UPSTREAM: cpufreq: schedutil: Fix per-CPU structure initialization in sugov_start()
UPSTREAM: cpufreq: schedutil: Pass sg_policy to get_next_freq()
UPSTREAM: cpufreq: schedutil: move cached_raw_freq to struct sugov_policy
UPSTREAM: cpufreq: schedutil: Rectify comment in sugov_irq_work() function
UPSTREAM: cpufreq: schedutil: irq-work and mutex are only used in slow path
UPSTREAM: cpufreq: schedutil: enable fast switch earlier
UPSTREAM: cpufreq: schedutil: Avoid indented labels
ANDROID: binder: add RT inheritance flag to node.
ANDROID: binder: improve priority inheritance.
ANDROID: binder: add min sched_policy to node.
ANDROID: binder: add support for RT prio inheritance.
ANDROID: binder: push new transactions to waiting threads.
ANDROID: binder: remove proc waitqueue
FROMLIST: binder: remove global binder lock
FROMLIST: binder: fix death race conditions
FROMLIST: binder: protect against stale pointers in print_binder_transaction
FROMLIST: binder: protect binder_ref with outer lock
FROMLIST: binder: use inner lock to protect thread accounting
FROMLIST: binder: protect transaction_stack with inner lock.
FROMLIST: binder: protect proc->threads with inner_lock
FROMLIST: binder: protect proc->nodes with inner lock
FROMLIST: binder: add spinlock to protect binder_node
FROMLIST: binder: add spinlocks to protect todo lists
FROMLIST: binder: use inner lock to sync work dq and node counts
FROMLIST: binder: introduce locking helper functions
FROMLIST: binder: use node->tmp_refs to ensure node safety
FROMLIST: binder: refactor binder ref inc/dec for thread safety
FROMLIST: binder: make sure accesses to proc/thread are safe
FROMLIST: binder: make sure target_node has strong ref
FROMLIST: binder: guarantee txn complete / errors delivered in-order
FROMLIST: binder: refactor binder_pop_transaction
FROMLIST: binder: use atomic for transaction_log index
FROMLIST: binder: add more debug info when allocation fails.
FROMLIST: binder: protect against two threads freeing buffer
FROMLIST: binder: remove dead code in binder_get_ref_for_node
FROMLIST: binder: don't modify thread->looper from other threads
FROMLIST: binder: avoid race conditions when enqueuing txn
FROMLIST: binder: refactor queue management in binder_thread_read
FROMLIST: binder: add log information for binder transaction failures
FROMLIST: binder: make binder_last_id an atomic
FROMLIST: binder: change binder_stats to atomics
FROMLIST: binder: add protection for non-perf cases
FROMLIST: binder: remove binder_debug_no_lock mechanism
FROMLIST: binder: move binder_alloc to separate file
FROMLIST: binder: separate out binder_alloc functions
FROMLIST: binder: remove unneeded cleanup code
FROMLIST: binder: separate binder allocator structure from binder proc
FROMLIST: binder: Use wake up hint for synchronous transactions.
Revert "android: binder: move global binder state into context struct."
sched: walt: fix window misalignment when HZ=300
ANDROID: android-base.cfg: remove CONFIG_CGROUP_DEBUG
ANDROID: sdcardfs: use mount_nodev and fix a issue in sdcardfs_kill_sb
UPSTREAM: selinux: enable genfscon labeling for tracefs
UPSTREAM: drivers/perf: arm-pmu: fix RCU usage on pmu resume from low-power
UPSTREAM: drivers/perf: arm_pmu: implement CPU_PM notifier
ANDROID: squashfs: Fix endianness issue
ANDROID: squashfs: Fix signed division issue
UPSTREAM: usb: gadget: f_fs: avoid out of bounds access on comp_desc
UPSTREAM: bpf: don't let ldimm64 leak map addresses on unprivileged
BACKPORT: ext4: fix data exposure after a crash
ANDROID: sdcardfs: remove dead function open_flags_to_access_mode()
ANDROID: android-base.cfg: split out arm64-specific configs
usb: gadget: f_fs: Fix possibe deadlock
ANDROID: uid_sys_stats: check previous uid_entry before call find_or_register_uid
ANDROID: sdcardfs: d_splice_alias can return error values
android: base-cfg: disable CONFIG_NFS_FS and CONFIG_NFSD
schedstats/eas: guard properly to avoid breaking non-smp schedstats users
BACKPORT: f2fs: sanity check size of nat and sit cache
FROMLIST: f2fs: sanity check checkpoint segno and blkoff
sched/tune: don't use schedtune before it is ready
sched/fair: use SCHED_CAPACITY_SCALE for energy normalization
sched/{fair,tune}: use reciprocal_value to compute boost margin
sched/tune: Initialize raw_spin_lock in boosted_groups
sched/tune: report when SchedTune has not been initialized
sched/tune: fix sched_energy_diff tracepoint
sched/tune: increase group count to 5
cpufreq/schedutil: use boosted_cpu_util for PELT to match WALT
sched/fair: Fix sched_group_energy() to support per-cpu capacity states
sched/fair: discount task contribution to find CPU with lowest utilization
sched/fair: ensure utilization signals are synchronized before use
sched/fair: remove task util from own cpu when placing waking task
trace:sched: Make util_avg in load_avg trace reflect PELT/WALT as used
sched/fair: Add eas (& cas) specific rq, sd and task stats
sched/core: Fix PELT jump to max OPP upon util increase
sched: EAS & 'single cpu per cluster'/cpu hotplug interoperability
UPSTREAM: sched/core: Fix group_entity's share update
UPSTREAM: sched/fair: Fix calc_cfs_shares() fixed point arithmetics width confusion
UPSTREAM: sched/fair: Fix incorrect task group ->load_avg
UPSTREAM: sched/fair: Fix effective_load() to consistently use smoothed load
UPSTREAM: sched/fair: Propagate asynchrous detach
UPSTREAM: sched/fair: Propagate load during synchronous attach/detach
UPSTREAM: sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list
BACKPORT: sched/fair: Factorize PELT update
UPSTREAM: sched/fair: Factorize attach/detach entity
UPSTREAM: sched/fair: Improve PELT stuff some more
UPSTREAM: sched/fair: Apply more PELT fixes
UPSTREAM: sched/fair: Fix post_init_entity_util_avg() serialization
BACKPORT: sched/fair: Initiate a new task's util avg to a bounded value
sched/fair: Simplify idle_idx handling in select_idle_sibling()
sched/fair: refactor find_best_target() for simplicity
sched/fair: Change cpu iteration order in find_best_target()
sched/core: Add first cpu w/ max/min orig capacity to root domain
sched/core: Remove remnants of commit fd5c98da1a42
sched: Remove sysctl_sched_is_big_little
sched/fair: Code !is_big_little path into select_energy_cpu_brute()
EAS: sched/fair: Re-integrate 'honor sync wakeups' into wakeup path
Fixup!: sched/fair.c: Set SchedTune specific struct energy_env.task
sched/fair: Energy-aware wake-up task placement
sched/fair: Add energy_diff dead-zone margin
sched/fair: Decommission energy_aware_wake_cpu()
sched/fair: Do not force want_affine eq. true if EAS is enabled
arm64: Set SD_ASYM_CPUCAPACITY sched_domain flag on DIE level
UPSTREAM: sched/fair: Fix incorrect comment for capacity_margin
UPSTREAM: sched/fair: Avoid pulling tasks from non-overloaded higher capacity groups
UPSTREAM: sched/fair: Add per-CPU min capacity to sched_group_capacity
UPSTREAM: sched/fair: Consider spare capacity in find_idlest_group()
UPSTREAM: sched/fair: Compute task/cpu utilization at wake-up correctly
UPSTREAM: sched/fair: Let asymmetric CPU configurations balance at wake-up
UPSTREAM: sched/core: Enable SD_BALANCE_WAKE for asymmetric capacity systems
UPSTREAM: sched/core: Pass child domain into sd_init()
UPSTREAM: sched/core: Introduce SD_ASYM_CPUCAPACITY sched_domain topology flag
UPSTREAM: sched/core: Remove unnecessary NULL-pointer check
UPSTREAM: sched/fair: Optimize find_idlest_cpu() when there is no choice
BACKPORT: sched/fair: Make the use of prev_cpu consistent in the wakeup path
UPSTREAM: sched/core: Fix power to capacity renaming in comment
Partial Revert: "WIP: sched: Add cpu capacity awareness to wakeup balancing"
Revert "WIP: sched: Consider spare cpu capacity at task wake-up"
FROM-LIST: cpufreq: schedutil: Redefine the rate_limit_us tunable
cpufreq: schedutil: add up/down frequency transition rate limits
trace/sched: add rq utilization signal for WALT
sched/cpufreq: make schedutil use WALT signal
sched: cpufreq: use rt_avg as estimate of required RT CPU capacity
cpufreq: schedutil: move slow path from workqueue to SCHED_FIFO task
BACKPORT: kthread: allow to cancel kthread work
sched/cpufreq: fix tunables for schedfreq governor
BACKPORT: cpufreq: schedutil: New governor based on scheduler utilization data
sched: backport cpufreq hooks from 4.9-rc4
ANDROID: Kconfig: add depends for UID_SYS_STATS
ANDROID: hid: uhid: implement refcount for open and close
Revert "ext4: require encryption feature for EXT4_IOC_SET_ENCRYPTION_POLICY"
ANDROID: mnt: Fix next_descendent
ANDROID: uid_sys_stats: defer io stats calulation for dead tasks
ANDROID: AVB: Fix linter errors.
ANDROID: AVB: Fix invalidate_vbmeta_submit().
ANDROID: sdcardfs: Check for NULL in revalidate
ANDROID: AVB: Only invalidate vbmeta when told to do so.
ANDROID: sdcardfs: Move top to its own struct
ANDROID: lowmemorykiller: account for unevictable pages
ANDROID: usb: gadget: fix NULL pointer issue in mtp_read()
ANDROID: usb: f_mtp: return error code if transfer error in receive_file_work function
ANDROID: android-base.cfg: remove spurious CONFIG_MODULES line
ANDROID: memory_state_time: fix undefined behavior with missing DT properties
ANDROID: rfkill: fix unused function warning
ANDROID: make PF_KEY SHA256 use RFC-compliant truncation.
ANDROID: sdcardfs: fix sdcardfs_destroy_inode for the inode RCU approach
ANDROID: android-base.cfg: remove NETFILTER_XT_MATCH_QUOTA2_LOG
ANDROID: sdcardfs: Don't iput if we didn't igrab
ANDROID: Add untag hacks to inet_release function
ANDROID: android-base.cfg: remove USB_OTG_WAKELOCK
ANDROID: android-base.cfg: remove defunct options
ANDROID: arm64: suspend: Restore the UAO state
ANDROID: usb: gadget: f_audio_source: disable the CPU C-states upon playback
ANDROID: usb: gadget: f_mtp: Set 0xFFFFFFFF in mtp header ContainerLength field
net: pppolac/pppopns: Add back the msg_flags
ANDROID: uid_sys_stats: fix access of task_uid(task)
BACKPORT: f2fs: sanity check log_blocks_per_seg
ANDROID: sdcardfs: Call lower fs's revalidate
ANDROID: sdcardfs: Avoid setting GIDs outside of valid ranges
ANDROID: sdcardfs: Copy meta-data from lower inode
Revert "Revert "Android: sdcardfs: Don't do d_add for lower fs""
ANDROID: sdcardfs: Use filesystem specific hash
ANDROID: AVB error handler to invalidate vbmeta partition.
ANDROID: Update init/do_mounts_dm.c to the latest ChromiumOS version.
Revert "[RFC]cgroup: Change from CAP_SYS_NICE to CAP_SYS_RESOURCE for cgroup migration permissions"
Revert "USB: gadget: u_ether: Fix data stall issue in RNDIS tethering mode"
ANDROID: uid_sys_stats: reduce update_io_stats overhead
UPSTREAM: char: lack of bool string made CONFIG_DEVPORT always on
UPSTREAM: char: Drop bogus dependency of DEVPORT on !M68K
Revert "Android: sdcardfs: Don't do d_add for lower fs"
ANDROID: usb: gadget: fix MTP enumeration issue under super speed mode
Android: sdcardfs: Don't complain in fixup_lower_ownership
Android: sdcardfs: Don't do d_add for lower fs
ANDROID: sdcardfs: ->iget fixes
Android: sdcardfs: Change cache GID value
BACKPORT: [UPSTREAM] ext2: convert to mbcache2
BACKPORT [UPSTREAM] ext4: convert to mbcache2
BACKPORT: [UPSTREAM] mbcache2: reimplement mbcache
UPSTREAM: net: socket: Make unnecessarily global sockfs_setattr() static
UPSTREAM: net: ipv4: Don't crash if passing a null sk to ip_do_redirect.
UPSTREAM: net/packet: fix overflow in check for priv area size
Revert "Revert "Revert "CHROMIUM: android: binder: Fix potential scheduling-while-atomic"""
ANDROID: sdcardfs: Directly pass lower file for mmap
UPSTREAM: checkpatch: special audit for revert commit line
UPSTREAM: PM / sleep: make PM notifiers called symmetrically
Revert "Revert "CHROMIUM: android: binder: Fix potential scheduling-while-atomic""
BACKPORT: arm64: dts: juno: fix cluster sleep state entry latency on all SoC versions
staging: android: ashmem: lseek failed due to no FMODE_LSEEK.
ANDROID: sdcardfs: update module info
ANDROID: sdcardfs: use d_splice_alias
ANDROID: sdcardfs: add read_iter/write_iter opeations
ANDROID: sdcardfs: fix ->llseek to update upper and lower offset
ANDROID: sdcardfs: copy lower inode attributes in ->ioctl
ANDROID: sdcardfs: remove unnecessary call to do_munmap
Merge 4.4.59 into android-4.4
UPSTREAM: ipv6 addrconf: implement RFC7559 router solicitation backoff
android: base-cfg: enable CONFIG_INET_DIAG_DESTROY
ANDROID: android-base.cfg: add CONFIG_MODULES option
ANDROID: android-base.cfg: add CONFIG_IKCONFIG option
ANDROID: android-base.cfg: properly sort the file
ANDROID: binder: add hwbinder,vndbinder to BINDER_DEVICES.
ANDROID: sort android-recommended.cfg
UPSTREAM: config/android: Remove CONFIG_IPV6_PRIVACY
UPSTREAM: config: android: set SELinux as default security mode
config: android: move device mapper options to recommended
ANDROID: ARM64: Allow to choose appended kernel image
UPSTREAM: arm64: vdso: constify vm_special_mapping used for aarch32 vectors page
UPSTREAM: arm64: vdso: add __init section marker to alloc_vectors_page
UPSTREAM: ARM: 8597/1: VDSO: put RO and RO after init objects into proper sections
UPSTREAM: arm64: Add support for CLOCK_MONOTONIC_RAW in clock_gettime() vDSO
UPSTREAM: arm64: Refactor vDSO time functions
UPSTREAM: arm64: fix vdso-offsets.h dependency
UPSTREAM: kbuild: drop FORCE from PHONY targets
UPSTREAM: mm: add PHYS_PFN, use it in __phys_to_pfn()
UPSTREAM: ARM: 8476/1: VDSO: use PTR_ERR_OR_ZERO for vma check
ANDROID: sdcardfs: Fix style issues in macros
ANDROID: sdcardfs: Use seq_puts over seq_printf
ANDROID: sdcardfs: Use to kstrout
ANDROID: sdcardfs: Use pr_[...] instead of printk
ANDROID: sdcardfs: remove unneeded null check
ANDROID: sdcardfs: Fix style issues with comments
ANDROID: sdcardfs: Fix formatting
ANDROID: sdcardfs: correct order of descriptors
fix the deadlock in xt_qtaguid when enable DDEBUG
net: ipv6: Add sysctl for minimum prefix len acceptable in RIOs.
ANDROID: mmc: core: export emmc revision
BACKPORT: mmc: core: Export device lifetime information through sysfs
ANDROID: android-verity: do not compile as independent module
ANDROID: sched: fix duplicate sched_group_energy const specifiers
config: disable CONFIG_USELIB and CONFIG_FHANDLE
ANDROID: power: align wakeup_sources format
ANDROID: dm: android-verity: allow disable dm-verity for Treble VTS
uid_sys_stats: change to use rt_mutex
ANDROID: vfs: user permission2 in notify_change2
ANDROID: sdcardfs: Fix gid issue
ANDROID: sdcardfs: Use tabs instead of spaces in multiuser.h
ANDROID: sdcardfs: Remove uninformative prints
ANDROID: sdcardfs: move path_put outside of spinlock
ANDROID: sdcardfs: Use case insensitive hash function
ANDROID: sdcardfs: declare MODULE_ALIAS_FS
ANDROID: sdcardfs: Get the blocksize from the lower fs
ANDROID: sdcardfs: Use d_invalidate instead of drop_recurisve
ANDROID: sdcardfs: Switch to internal case insensitive compare
ANDROID: sdcardfs: Use spin_lock_nested
ANDROID: sdcardfs: Replace get/put with d_lock
ANDROID: sdcardfs: rate limit warning print
ANDROID: sdcardfs: Fix case insensitive lookup
ANDROID: uid_sys_stats: account for fsync syscalls
ANDROID: sched: add a counter to track fsync
ANDROID: uid_sys_stats: fix negative write bytes.
ANDROID: uid_sys_stats: allow writing same state
ANDROID: uid_sys_stats: rename uid_cputime.c to uid_sys_stats.c
ANDROID: uid_cputime: add per-uid IO usage accounting
DTB: Add EAS compatible Juno Energy model to 'juno.dts'
arm64: dts: juno: Add idle-states to device tree
ANDROID: Replace spaces by '_' for some android filesystem tracepoints.
usb: gadget: f_accessory: Fix for UsbAccessory clean unbind.
android: binder: move global binder state into context struct.
android: binder: add padding to binder_fd_array_object.
binder: use group leader instead of open thread
nf: IDLETIMER: Use fullsock when querying uid
nf: IDLETIMER: Fix use after free condition during work
ANDROID: dm: android-verity: fix table_make_digest() error handling
ANDROID: usb: gadget: function: Fix commenting style
cpufreq: interactive governor drops bits in time calculation
ANDROID: sdcardfs: support direct-IO (DIO) operations
ANDROID: sdcardfs: implement vm_ops->page_mkwrite
ANDROID: sdcardfs: Don't bother deleting freelist
ANDROID: sdcardfs: Add missing path_put
ANDROID: sdcardfs: Fix incorrect hash
ANDROID: ext4 crypto: Disables zeroing on truncation when there's no key
ANDROID: ext4: add a non-reversible key derivation method
ANDROID: ext4: allow encrypting filenames using HEH algorithm
ANDROID: arm64/crypto: add ARMv8-CE optimized poly_hash algorithm
ANDROID: crypto: heh - factor out poly_hash algorithm
ANDROID: crypto: heh - Add Hash-Encrypt-Hash (HEH) algorithm
ANDROID: crypto: gf128mul - Add ble multiplication functions
ANDROID: crypto: gf128mul - Refactor gf128 overflow macros and tables
UPSTREAM: crypto: gf128mul - Zero memory when freeing multiplication table
ANDROID: crypto: shash - Add crypto_grab_shash() and crypto_spawn_shash_alg()
ANDROID: crypto: allow blkcipher walks over ablkcipher data
UPSTREAM: arm/arm64: crypto: assure that ECB modes don't require an IV
ANDROID: Refactor fs readpage/write tracepoints.
ANDROID: export security_path_chown
Squashfs: optimize reading uncompressed data
Squashfs: implement .readpages()
Squashfs: replace buffer_head with BIO
Squashfs: refactor page_actor
Squashfs: remove the FILE_CACHE option
ANDROID: android-recommended.cfg: CONFIG_CPU_SW_DOMAIN_PAN=y
FROMLIST: 9p: fix a potential acl leak
BACKPORT: posix_acl: Clear SGID bit when setting file permissions
UPSTREAM: udp: properly support MSG_PEEK with truncated buffers
UPSTREAM: arm64: Allow hw watchpoint of length 3,5,6 and 7
BACKPORT: arm64: hw_breakpoint: Handle inexact watchpoint addresses
UPSTREAM: arm64: Allow hw watchpoint at varied offset from base address
BACKPORT: hw_breakpoint: Allow watchpoint of length 3,5,6 and 7
ANDROID: sdcardfs: Switch strcasecmp for internal call
ANDROID: sdcardfs: switch to full_name_hash and qstr
ANDROID: sdcardfs: Add GID Derivation to sdcardfs
ANDROID: sdcardfs: Remove redundant operation
ANDROID: sdcardfs: add support for user permission isolation
ANDROID: sdcardfs: Refactor configfs interface
ANDROID: sdcardfs: Allow non-owners to touch
ANDROID: binder: fix format specifier for type binder_size_t
ANDROID: fs: Export vfs_rmdir2
ANDROID: fs: Export free_fs_struct and set_fs_pwd
BACKPORT: Input: xpad - validate USB endpoint count during probe
BACKPORT: Input: xpad - fix oops when attaching an unknown Xbox One gamepad
ANDROID: mnt: remount should propagate to slaves of slaves
ANDROID: sdcardfs: Switch ->d_inode to d_inode()
ANDROID: sdcardfs: Fix locking issue with permision fix up
ANDROID: sdcardfs: Change magic value
ANDROID: sdcardfs: Use per mount permissions
ANDROID: sdcardfs: Add gid and mask to private mount data
ANDROID: sdcardfs: User new permission2 functions
ANDROID: vfs: Add setattr2 for filesystems with per mount permissions
ANDROID: vfs: Add permission2 for filesystems with per mount permissions
ANDROID: vfs: Allow filesystems to access their private mount data
ANDROID: mnt: Add filesystem private data to mount points
ANDROID: sdcardfs: Move directory unlock before touch
ANDROID: sdcardfs: fix external storage exporting incorrect uid
ANDROID: sdcardfs: Added top to sdcardfs_inode_info
ANDROID: sdcardfs: Switch package list to RCU
ANDROID: sdcardfs: Fix locking for permission fix up
ANDROID: sdcardfs: Check for other cases on path lookup
ANDROID: sdcardfs: override umask on mkdir and create
arm64: kernel: Fix build warning
DEBUG: sched/fair: Fix sched_load_avg_cpu events for task_groups
DEBUG: sched/fair: Fix missing sched_load_avg_cpu events
UPSTREAM: l2tp: fix racy SOCK_ZAPPED flag check in l2tp_ip{,6}_bind()
UPSTREAM: packet: fix race condition in packet_set_ring
UPSTREAM: netlink: Fix dump skb leak/double free
UPSTREAM: net: avoid signed overflows for SO_{SND|RCV}BUFFORCE
MIPS: Prevent "restoration" of MSA context in non-MSA kernels
net: socket: don't set sk_uid to garbage value in ->setattr()
ANDROID: configs: CONFIG_ARM64_SW_TTBR0_PAN=y
UPSTREAM: arm64: Disable PAN on uaccess_enable()
UPSTREAM: arm64: Enable CONFIG_ARM64_SW_TTBR0_PAN
UPSTREAM: arm64: xen: Enable user access before a privcmd hvc call
UPSTREAM: arm64: Handle faults caused by inadvertent user access with PAN enabled
BACKPORT: arm64: Disable TTBR0_EL1 during normal kernel execution
BACKPORT: arm64: Introduce uaccess_{disable,enable} functionality based on TTBR0_EL1
BACKPORT: arm64: Factor out TTBR0_EL1 post-update workaround into a specific asm macro
BACKPORT: arm64: Factor out PAN enabling/disabling into separate uaccess_* macros
UPSTREAM: arm64: alternative: add auto-nop infrastructure
UPSTREAM: arm64: barriers: introduce nops and __nops macros for NOP sequences
Revert "FROMLIST: arm64: Factor out PAN enabling/disabling into separate uaccess_* macros"
Revert "FROMLIST: arm64: Factor out TTBR0_EL1 post-update workaround into a specific asm macro"
Revert "FROMLIST: arm64: Introduce uaccess_{disable,enable} functionality based on TTBR0_EL1"
Revert "FROMLIST: arm64: Disable TTBR0_EL1 during normal kernel execution"
Revert "FROMLIST: arm64: Handle faults caused by inadvertent user access with PAN enabled"
Revert "FROMLIST: arm64: xen: Enable user access before a privcmd hvc call"
Revert "FROMLIST: arm64: Enable CONFIG_ARM64_SW_TTBR0_PAN"
ANDROID: sched/walt: fix build failure if FAIR_GROUP_SCHED=n
ANDROID: trace: net: use %pK for kernel pointers
ANDROID: android-base: Enable QUOTA related configs
net: ipv4: Don't crash if passing a null sk to ip_rt_update_pmtu.
net: inet: Support UID-based routing in IP protocols.
net: core: add UID to flows, rules, and routes
net: core: Add a UID field to struct sock.
Revert "net: core: Support UID-based routing."
UPSTREAM: efi/arm64: Don't apply MEMBLOCK_NOMAP to UEFI memory map mapping
UPSTREAM: arm64: mm: always take dirty state from new pte in ptep_set_access_flags
UPSTREAM: arm64: Implement pmdp_set_access_flags() for hardware AF/DBM
UPSTREAM: arm64: Fix typo in the pmdp_huge_get_and_clear() definition
UPSTREAM: arm64: enable CONFIG_DEBUG_RODATA by default
goldfish: enable CONFIG_INET_DIAG_DESTROY
sched/walt: kill {min,max}_capacity
sched: fix wrong truncation of walt_avg
build: fix build config kernel_dir
ANDROID: dm verity: add minimum prefetch size
build: add build server configs for goldfish
usb: gadget: Fix compilation problem with tx_qlen field
Conflicts:
Documentation/kasan.txt
Makefile
arch/arm64/Makefile
arch/arm64/boot/Makefile
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/signal32.h
arch/arm64/include/asm/suspend.h
arch/arm64/include/asm/vdso_datapage.h
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/head.S
arch/arm64/kernel/insn.c
arch/arm64/kernel/io.c
arch/arm64/kernel/psci.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/suspend.c
arch/arm64/kernel/traps.c
arch/arm64/kernel/vdso.c
arch/arm64/kernel/vdso/gettimeofday.S
arch/arm64/kernel/vmlinux.lds.S
arch/arm64/mm/cache.S
arch/arm64/mm/fault.c
arch/arm64/mm/kasan_init.c
arch/arm64/mm/mmu.c
arch/arm64/mm/proc.S
arch/x86/include/asm/thread_info.h
arch/x86/kernel/Makefile
arch/x86/kernel/kprobes/core.c
block/blk-core.c
build.config.common
drivers/Kconfig
drivers/Makefile
drivers/android/Makefile
drivers/android/binder.c
drivers/base/power/main.c
drivers/block/loop.c
drivers/clocksource/Kconfig
drivers/cpufreq/Kconfig
drivers/cpufreq/cpufreq-dt.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/irqchip/Kconfig
drivers/irqchip/Makefile
drivers/md/Kconfig
drivers/md/Makefile
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/uid_sys_stats.c
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/staging/android/Kconfig
drivers/staging/android/Makefile
drivers/staging/android/fiq_debugger/fiq_watchdog.h
drivers/staging/android/ion/ion.c
drivers/staging/android/ion/ion_cma_heap.c
drivers/tee/optee/shm_pool.h
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/function/f_accessory.c
fs/exec.c
fs/ext4/crypto.c
fs/ext4/crypto_fname.c
fs/ext4/crypto_key.c
fs/ext4/ext4.h
fs/ext4/ext4_crypto.h
fs/f2fs/crypto_policy.c
fs/f2fs/data.c
fs/f2fs/f2fs.h
fs/f2fs/super.c
fs/proc/task_mmu.c
fs/sdcardfs/derived_perm.c
fs/sdcardfs/inode.c
fs/sdcardfs/main.c
fs/sdcardfs/sdcardfs.h
fs/squashfs/lz4_wrapper.c
include/linux/blkdev.h
include/linux/cpufreq.h
include/linux/dcache.h
include/linux/mmc/card.h
include/linux/mmc/mmc.h
include/linux/msm_mdp.h
include/linux/sched.h
include/linux/slab_def.h
include/linux/slub_def.h
include/linux/thread_info.h
include/trace/events/android_fs.h
include/trace/events/sched.h
include/uapi/linux/android/binder.h
include/uapi/linux/ipv6.h
include/uapi/linux/prctl.h
kernel/configs/android-base.config
kernel/configs/android-recommended.config
kernel/cpu.c
kernel/fork.c
kernel/sched/Makefile
kernel/sched/core.c
kernel/sched/cpufreq_sched.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/sched/stats.c
kernel/sched/walt.c
kernel/sys.c
kernel/sysctl.c
kernel/time/timekeeping.c
lib/Kconfig
lib/test_kasan.c
mm/kasan/Makefile
mm/kasan/kasan.c
mm/kasan/kasan.h
mm/kasan/report.c
mm/slab.c
mm/slab.h
mm/slub.c
net/ipv4/route.c
net/ipv4/tcp_ipv4.c
net/ipv4/xfrm4_policy.c
net/ipv6/route.c
net/netfilter/xt_IDLETIMER.c
net/netfilter/xt_qtaguid.c
net/netfilter/xt_quota2.c
net/socket.c
net/wireless/scan.c
scripts/Makefile.lib
scripts/checkpatch.pl
security/selinux/nlmsgtab.c
Skipped commit:
a08cafa7e0 ANDROID: ARM64: Allow to choose appended kernel image
Change-Id: I306e14a74d75f56cd39b5ad344f0f4440c26b52a
2694 lines
65 KiB
C
2694 lines
65 KiB
C
/*
|
|
* Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
|
|
* policies)
|
|
*/
|
|
|
|
#include "sched.h"
|
|
|
|
#include <linux/interrupt.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/irq_work.h>
|
|
#include <linux/hrtimer.h>
|
|
|
|
#include "walt.h"
|
|
#include "tune.h"
|
|
|
|
int sched_rr_timeslice = RR_TIMESLICE;
|
|
int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
|
|
|
|
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
|
|
|
|
struct rt_bandwidth def_rt_bandwidth;
|
|
|
|
static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
|
|
{
|
|
struct rt_bandwidth *rt_b =
|
|
container_of(timer, struct rt_bandwidth, rt_period_timer);
|
|
int idle = 0;
|
|
int overrun;
|
|
|
|
raw_spin_lock(&rt_b->rt_runtime_lock);
|
|
for (;;) {
|
|
overrun = hrtimer_forward_now(timer, rt_b->rt_period);
|
|
if (!overrun)
|
|
break;
|
|
|
|
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
|
idle = do_sched_rt_period_timer(rt_b, overrun);
|
|
raw_spin_lock(&rt_b->rt_runtime_lock);
|
|
}
|
|
if (idle)
|
|
rt_b->rt_period_active = 0;
|
|
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
|
|
|
return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
|
|
}
|
|
|
|
void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
|
|
{
|
|
rt_b->rt_period = ns_to_ktime(period);
|
|
rt_b->rt_runtime = runtime;
|
|
|
|
raw_spin_lock_init(&rt_b->rt_runtime_lock);
|
|
|
|
hrtimer_init(&rt_b->rt_period_timer,
|
|
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
rt_b->rt_period_timer.function = sched_rt_period_timer;
|
|
}
|
|
|
|
static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
|
|
{
|
|
if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
|
|
return;
|
|
|
|
raw_spin_lock(&rt_b->rt_runtime_lock);
|
|
if (!rt_b->rt_period_active) {
|
|
rt_b->rt_period_active = 1;
|
|
hrtimer_forward_now(&rt_b->rt_period_timer, rt_b->rt_period);
|
|
hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
|
|
}
|
|
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
|
}
|
|
|
|
void init_rt_rq(struct rt_rq *rt_rq)
|
|
{
|
|
struct rt_prio_array *array;
|
|
int i;
|
|
|
|
array = &rt_rq->active;
|
|
for (i = 0; i < MAX_RT_PRIO; i++) {
|
|
INIT_LIST_HEAD(array->queue + i);
|
|
__clear_bit(i, array->bitmap);
|
|
}
|
|
/* delimiter for bitsearch: */
|
|
__set_bit(MAX_RT_PRIO, array->bitmap);
|
|
|
|
#if defined CONFIG_SMP
|
|
rt_rq->highest_prio.curr = MAX_RT_PRIO;
|
|
rt_rq->highest_prio.next = MAX_RT_PRIO;
|
|
rt_rq->rt_nr_migratory = 0;
|
|
rt_rq->overloaded = 0;
|
|
plist_head_init(&rt_rq->pushable_tasks);
|
|
#endif /* CONFIG_SMP */
|
|
/* We start is dequeued state, because no RT tasks are queued */
|
|
rt_rq->rt_queued = 0;
|
|
|
|
rt_rq->rt_time = 0;
|
|
rt_rq->rt_throttled = 0;
|
|
rt_rq->rt_runtime = 0;
|
|
raw_spin_lock_init(&rt_rq->rt_runtime_lock);
|
|
}
|
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
|
|
{
|
|
hrtimer_cancel(&rt_b->rt_period_timer);
|
|
}
|
|
|
|
#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
|
|
|
|
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
|
|
{
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
WARN_ON_ONCE(!rt_entity_is_task(rt_se));
|
|
#endif
|
|
return container_of(rt_se, struct task_struct, rt);
|
|
}
|
|
|
|
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
|
|
{
|
|
return rt_rq->rq;
|
|
}
|
|
|
|
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
|
|
{
|
|
return rt_se->rt_rq;
|
|
}
|
|
|
|
static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
|
|
{
|
|
struct rt_rq *rt_rq = rt_se->rt_rq;
|
|
|
|
return rt_rq->rq;
|
|
}
|
|
|
|
void free_rt_sched_group(struct task_group *tg)
|
|
{
|
|
int i;
|
|
|
|
if (tg->rt_se)
|
|
destroy_rt_bandwidth(&tg->rt_bandwidth);
|
|
|
|
for_each_possible_cpu(i) {
|
|
if (tg->rt_rq)
|
|
kfree(tg->rt_rq[i]);
|
|
if (tg->rt_se)
|
|
kfree(tg->rt_se[i]);
|
|
}
|
|
|
|
kfree(tg->rt_rq);
|
|
kfree(tg->rt_se);
|
|
}
|
|
|
|
void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
|
|
struct sched_rt_entity *rt_se, int cpu,
|
|
struct sched_rt_entity *parent)
|
|
{
|
|
struct rq *rq = cpu_rq(cpu);
|
|
|
|
rt_rq->highest_prio.curr = MAX_RT_PRIO;
|
|
rt_rq->rt_nr_boosted = 0;
|
|
rt_rq->rq = rq;
|
|
rt_rq->tg = tg;
|
|
|
|
tg->rt_rq[cpu] = rt_rq;
|
|
tg->rt_se[cpu] = rt_se;
|
|
|
|
if (!rt_se)
|
|
return;
|
|
|
|
if (!parent)
|
|
rt_se->rt_rq = &rq->rt;
|
|
else
|
|
rt_se->rt_rq = parent->my_q;
|
|
|
|
rt_se->my_q = rt_rq;
|
|
rt_se->parent = parent;
|
|
INIT_LIST_HEAD(&rt_se->run_list);
|
|
}
|
|
|
|
int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
|
|
{
|
|
struct rt_rq *rt_rq;
|
|
struct sched_rt_entity *rt_se;
|
|
int i;
|
|
|
|
tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
|
|
if (!tg->rt_rq)
|
|
goto err;
|
|
tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
|
|
if (!tg->rt_se)
|
|
goto err;
|
|
|
|
init_rt_bandwidth(&tg->rt_bandwidth,
|
|
ktime_to_ns(def_rt_bandwidth.rt_period), 0);
|
|
|
|
for_each_possible_cpu(i) {
|
|
rt_rq = kzalloc_node(sizeof(struct rt_rq),
|
|
GFP_KERNEL, cpu_to_node(i));
|
|
if (!rt_rq)
|
|
goto err;
|
|
|
|
rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
|
|
GFP_KERNEL, cpu_to_node(i));
|
|
if (!rt_se)
|
|
goto err_free_rq;
|
|
|
|
init_rt_rq(rt_rq);
|
|
rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
|
|
init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
|
|
}
|
|
|
|
return 1;
|
|
|
|
err_free_rq:
|
|
kfree(rt_rq);
|
|
err:
|
|
return 0;
|
|
}
|
|
|
|
#else /* CONFIG_RT_GROUP_SCHED */
|
|
|
|
#define rt_entity_is_task(rt_se) (1)
|
|
|
|
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
|
|
{
|
|
return container_of(rt_se, struct task_struct, rt);
|
|
}
|
|
|
|
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
|
|
{
|
|
return container_of(rt_rq, struct rq, rt);
|
|
}
|
|
|
|
static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
|
|
{
|
|
struct task_struct *p = rt_task_of(rt_se);
|
|
|
|
return task_rq(p);
|
|
}
|
|
|
|
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
|
|
{
|
|
struct rq *rq = rq_of_rt_se(rt_se);
|
|
|
|
return &rq->rt;
|
|
}
|
|
|
|
void free_rt_sched_group(struct task_group *tg) { }
|
|
|
|
int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
|
|
{
|
|
return 1;
|
|
}
|
|
#endif /* CONFIG_RT_GROUP_SCHED */
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static void pull_rt_task(struct rq *this_rq);
|
|
|
|
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
|
|
{
|
|
/* Try to pull RT tasks here if we lower this rq's prio */
|
|
return rq->rt.highest_prio.curr > prev->prio;
|
|
}
|
|
|
|
static inline int rt_overloaded(struct rq *rq)
|
|
{
|
|
return atomic_read(&rq->rd->rto_count);
|
|
}
|
|
|
|
static inline void rt_set_overload(struct rq *rq)
|
|
{
|
|
if (!rq->online)
|
|
return;
|
|
|
|
cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
|
|
/*
|
|
* Make sure the mask is visible before we set
|
|
* the overload count. That is checked to determine
|
|
* if we should look at the mask. It would be a shame
|
|
* if we looked at the mask, but the mask was not
|
|
* updated yet.
|
|
*
|
|
* Matched by the barrier in pull_rt_task().
|
|
*/
|
|
smp_wmb();
|
|
atomic_inc(&rq->rd->rto_count);
|
|
}
|
|
|
|
static inline void rt_clear_overload(struct rq *rq)
|
|
{
|
|
if (!rq->online)
|
|
return;
|
|
|
|
/* the order here really doesn't matter */
|
|
atomic_dec(&rq->rd->rto_count);
|
|
cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
|
|
}
|
|
|
|
static void update_rt_migration(struct rt_rq *rt_rq)
|
|
{
|
|
if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
|
|
if (!rt_rq->overloaded) {
|
|
rt_set_overload(rq_of_rt_rq(rt_rq));
|
|
rt_rq->overloaded = 1;
|
|
}
|
|
} else if (rt_rq->overloaded) {
|
|
rt_clear_overload(rq_of_rt_rq(rt_rq));
|
|
rt_rq->overloaded = 0;
|
|
}
|
|
}
|
|
|
|
static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
|
{
|
|
struct task_struct *p;
|
|
|
|
if (!rt_entity_is_task(rt_se))
|
|
return;
|
|
|
|
p = rt_task_of(rt_se);
|
|
rt_rq = &rq_of_rt_rq(rt_rq)->rt;
|
|
|
|
rt_rq->rt_nr_total++;
|
|
if (p->nr_cpus_allowed > 1)
|
|
rt_rq->rt_nr_migratory++;
|
|
|
|
update_rt_migration(rt_rq);
|
|
}
|
|
|
|
static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
|
{
|
|
struct task_struct *p;
|
|
|
|
if (!rt_entity_is_task(rt_se))
|
|
return;
|
|
|
|
p = rt_task_of(rt_se);
|
|
rt_rq = &rq_of_rt_rq(rt_rq)->rt;
|
|
|
|
rt_rq->rt_nr_total--;
|
|
if (p->nr_cpus_allowed > 1)
|
|
rt_rq->rt_nr_migratory--;
|
|
|
|
update_rt_migration(rt_rq);
|
|
}
|
|
|
|
static inline int has_pushable_tasks(struct rq *rq)
|
|
{
|
|
return !plist_head_empty(&rq->rt.pushable_tasks);
|
|
}
|
|
|
|
static DEFINE_PER_CPU(struct callback_head, rt_push_head);
|
|
static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
|
|
|
|
static void push_rt_tasks(struct rq *);
|
|
static void pull_rt_task(struct rq *);
|
|
|
|
static inline void queue_push_tasks(struct rq *rq)
|
|
{
|
|
if (!has_pushable_tasks(rq))
|
|
return;
|
|
|
|
queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
|
|
}
|
|
|
|
static inline void queue_pull_task(struct rq *rq)
|
|
{
|
|
queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
|
|
}
|
|
|
|
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
|
|
{
|
|
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
|
|
plist_node_init(&p->pushable_tasks, p->prio);
|
|
plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
|
|
|
|
/* Update the highest prio pushable task */
|
|
if (p->prio < rq->rt.highest_prio.next)
|
|
rq->rt.highest_prio.next = p->prio;
|
|
}
|
|
|
|
static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
|
|
{
|
|
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
|
|
|
|
/* Update the new highest prio pushable task */
|
|
if (has_pushable_tasks(rq)) {
|
|
p = plist_first_entry(&rq->rt.pushable_tasks,
|
|
struct task_struct, pushable_tasks);
|
|
rq->rt.highest_prio.next = p->prio;
|
|
} else
|
|
rq->rt.highest_prio.next = MAX_RT_PRIO;
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
|
|
{
|
|
}
|
|
|
|
static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
|
|
{
|
|
}
|
|
|
|
static inline
|
|
void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
|
{
|
|
}
|
|
|
|
static inline
|
|
void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
|
{
|
|
}
|
|
|
|
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline void pull_rt_task(struct rq *this_rq)
|
|
{
|
|
}
|
|
|
|
static inline void queue_push_tasks(struct rq *rq)
|
|
{
|
|
}
|
|
#endif /* CONFIG_SMP */
|
|
|
|
static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
|
|
static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
|
|
|
|
static inline int on_rt_rq(struct sched_rt_entity *rt_se)
|
|
{
|
|
return !list_empty(&rt_se->run_list);
|
|
}
|
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
|
|
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
|
|
{
|
|
if (!rt_rq->tg)
|
|
return RUNTIME_INF;
|
|
|
|
return rt_rq->rt_runtime;
|
|
}
|
|
|
|
static inline u64 sched_rt_period(struct rt_rq *rt_rq)
|
|
{
|
|
return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
|
|
}
|
|
|
|
typedef struct task_group *rt_rq_iter_t;
|
|
|
|
static inline struct task_group *next_task_group(struct task_group *tg)
|
|
{
|
|
do {
|
|
tg = list_entry_rcu(tg->list.next,
|
|
typeof(struct task_group), list);
|
|
} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
|
|
|
|
if (&tg->list == &task_groups)
|
|
tg = NULL;
|
|
|
|
return tg;
|
|
}
|
|
|
|
#define for_each_rt_rq(rt_rq, iter, rq) \
|
|
for (iter = container_of(&task_groups, typeof(*iter), list); \
|
|
(iter = next_task_group(iter)) && \
|
|
(rt_rq = iter->rt_rq[cpu_of(rq)]);)
|
|
|
|
#define for_each_sched_rt_entity(rt_se) \
|
|
for (; rt_se; rt_se = rt_se->parent)
|
|
|
|
static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
|
|
{
|
|
return rt_se->my_q;
|
|
}
|
|
|
|
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
|
|
static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
|
|
|
|
static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
|
|
{
|
|
struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
|
|
struct rq *rq = rq_of_rt_rq(rt_rq);
|
|
struct sched_rt_entity *rt_se;
|
|
|
|
int cpu = cpu_of(rq);
|
|
|
|
rt_se = rt_rq->tg->rt_se[cpu];
|
|
|
|
if (rt_rq->rt_nr_running) {
|
|
if (!rt_se)
|
|
enqueue_top_rt_rq(rt_rq);
|
|
else if (!on_rt_rq(rt_se))
|
|
enqueue_rt_entity(rt_se, false);
|
|
|
|
if (rt_rq->highest_prio.curr < curr->prio)
|
|
resched_curr(rq);
|
|
}
|
|
}
|
|
|
|
static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
|
|
{
|
|
struct sched_rt_entity *rt_se;
|
|
int cpu = cpu_of(rq_of_rt_rq(rt_rq));
|
|
|
|
rt_se = rt_rq->tg->rt_se[cpu];
|
|
|
|
if (!rt_se)
|
|
dequeue_top_rt_rq(rt_rq);
|
|
else if (on_rt_rq(rt_se))
|
|
dequeue_rt_entity(rt_se);
|
|
}
|
|
|
|
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
|
|
{
|
|
return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
|
|
}
|
|
|
|
static int rt_se_boosted(struct sched_rt_entity *rt_se)
|
|
{
|
|
struct rt_rq *rt_rq = group_rt_rq(rt_se);
|
|
struct task_struct *p;
|
|
|
|
if (rt_rq)
|
|
return !!rt_rq->rt_nr_boosted;
|
|
|
|
p = rt_task_of(rt_se);
|
|
return p->prio != p->normal_prio;
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
static inline const struct cpumask *sched_rt_period_mask(void)
|
|
{
|
|
return this_rq()->rd->span;
|
|
}
|
|
#else
|
|
static inline const struct cpumask *sched_rt_period_mask(void)
|
|
{
|
|
return cpu_online_mask;
|
|
}
|
|
#endif
|
|
|
|
static inline
|
|
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
|
|
{
|
|
return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
|
|
}
|
|
|
|
static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
|
|
{
|
|
return &rt_rq->tg->rt_bandwidth;
|
|
}
|
|
|
|
#else /* !CONFIG_RT_GROUP_SCHED */
|
|
|
|
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
|
|
{
|
|
return rt_rq->rt_runtime;
|
|
}
|
|
|
|
static inline u64 sched_rt_period(struct rt_rq *rt_rq)
|
|
{
|
|
return ktime_to_ns(def_rt_bandwidth.rt_period);
|
|
}
|
|
|
|
typedef struct rt_rq *rt_rq_iter_t;
|
|
|
|
#define for_each_rt_rq(rt_rq, iter, rq) \
|
|
for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
|
|
|
|
#define for_each_sched_rt_entity(rt_se) \
|
|
for (; rt_se; rt_se = NULL)
|
|
|
|
static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
|
|
{
|
|
struct rq *rq = rq_of_rt_rq(rt_rq);
|
|
|
|
if (!rt_rq->rt_nr_running)
|
|
return;
|
|
|
|
enqueue_top_rt_rq(rt_rq);
|
|
resched_curr(rq);
|
|
}
|
|
|
|
static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
|
|
{
|
|
dequeue_top_rt_rq(rt_rq);
|
|
}
|
|
|
|
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
|
|
{
|
|
return rt_rq->rt_throttled;
|
|
}
|
|
|
|
static inline const struct cpumask *sched_rt_period_mask(void)
|
|
{
|
|
return cpu_online_mask;
|
|
}
|
|
|
|
static inline
|
|
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
|
|
{
|
|
return &cpu_rq(cpu)->rt;
|
|
}
|
|
|
|
static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
|
|
{
|
|
return &def_rt_bandwidth;
|
|
}
|
|
|
|
#endif /* CONFIG_RT_GROUP_SCHED */
|
|
|
|
bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
|
|
{
|
|
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
|
|
|
return (hrtimer_active(&rt_b->rt_period_timer) ||
|
|
rt_rq->rt_time < rt_b->rt_runtime);
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
/*
|
|
* We ran out of runtime, see if we can borrow some from our neighbours.
|
|
*/
|
|
static void do_balance_runtime(struct rt_rq *rt_rq)
|
|
{
|
|
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
|
struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
|
|
int i, weight;
|
|
u64 rt_period;
|
|
|
|
weight = cpumask_weight(rd->span);
|
|
|
|
raw_spin_lock(&rt_b->rt_runtime_lock);
|
|
rt_period = ktime_to_ns(rt_b->rt_period);
|
|
for_each_cpu(i, rd->span) {
|
|
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
|
|
s64 diff;
|
|
|
|
if (iter == rt_rq)
|
|
continue;
|
|
|
|
raw_spin_lock(&iter->rt_runtime_lock);
|
|
/*
|
|
* Either all rqs have inf runtime and there's nothing to steal
|
|
* or __disable_runtime() below sets a specific rq to inf to
|
|
* indicate its been disabled and disalow stealing.
|
|
*/
|
|
if (iter->rt_runtime == RUNTIME_INF)
|
|
goto next;
|
|
|
|
/*
|
|
* From runqueues with spare time, take 1/n part of their
|
|
* spare time, but no more than our period.
|
|
*/
|
|
diff = iter->rt_runtime - iter->rt_time;
|
|
if (diff > 0) {
|
|
diff = div_u64((u64)diff, weight);
|
|
if (rt_rq->rt_runtime + diff > rt_period)
|
|
diff = rt_period - rt_rq->rt_runtime;
|
|
iter->rt_runtime -= diff;
|
|
rt_rq->rt_runtime += diff;
|
|
if (rt_rq->rt_runtime == rt_period) {
|
|
raw_spin_unlock(&iter->rt_runtime_lock);
|
|
break;
|
|
}
|
|
}
|
|
next:
|
|
raw_spin_unlock(&iter->rt_runtime_lock);
|
|
}
|
|
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
|
}
|
|
|
|
/*
|
|
* Ensure this RQ takes back all the runtime it lend to its neighbours.
|
|
*/
|
|
static void __disable_runtime(struct rq *rq)
|
|
{
|
|
struct root_domain *rd = rq->rd;
|
|
rt_rq_iter_t iter;
|
|
struct rt_rq *rt_rq;
|
|
|
|
if (unlikely(!scheduler_running))
|
|
return;
|
|
|
|
for_each_rt_rq(rt_rq, iter, rq) {
|
|
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
|
s64 want;
|
|
int i;
|
|
|
|
raw_spin_lock(&rt_b->rt_runtime_lock);
|
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
|
/*
|
|
* Either we're all inf and nobody needs to borrow, or we're
|
|
* already disabled and thus have nothing to do, or we have
|
|
* exactly the right amount of runtime to take out.
|
|
*/
|
|
if (rt_rq->rt_runtime == RUNTIME_INF ||
|
|
rt_rq->rt_runtime == rt_b->rt_runtime)
|
|
goto balanced;
|
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
|
|
|
/*
|
|
* Calculate the difference between what we started out with
|
|
* and what we current have, that's the amount of runtime
|
|
* we lend and now have to reclaim.
|
|
*/
|
|
want = rt_b->rt_runtime - rt_rq->rt_runtime;
|
|
|
|
/*
|
|
* Greedy reclaim, take back as much as we can.
|
|
*/
|
|
for_each_cpu(i, rd->span) {
|
|
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
|
|
s64 diff;
|
|
|
|
/*
|
|
* Can't reclaim from ourselves or disabled runqueues.
|
|
*/
|
|
if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
|
|
continue;
|
|
|
|
raw_spin_lock(&iter->rt_runtime_lock);
|
|
if (want > 0) {
|
|
diff = min_t(s64, iter->rt_runtime, want);
|
|
iter->rt_runtime -= diff;
|
|
want -= diff;
|
|
} else {
|
|
iter->rt_runtime -= want;
|
|
want -= want;
|
|
}
|
|
raw_spin_unlock(&iter->rt_runtime_lock);
|
|
|
|
if (!want)
|
|
break;
|
|
}
|
|
|
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
|
/*
|
|
* We cannot be left wanting - that would mean some runtime
|
|
* leaked out of the system.
|
|
*/
|
|
BUG_ON(want);
|
|
balanced:
|
|
/*
|
|
* Disable all the borrow logic by pretending we have inf
|
|
* runtime - in which case borrowing doesn't make sense.
|
|
*/
|
|
rt_rq->rt_runtime = RUNTIME_INF;
|
|
rt_rq->rt_throttled = 0;
|
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
|
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
|
|
|
/* Make rt_rq available for pick_next_task() */
|
|
sched_rt_rq_enqueue(rt_rq);
|
|
}
|
|
}
|
|
|
|
static void __enable_runtime(struct rq *rq)
|
|
{
|
|
rt_rq_iter_t iter;
|
|
struct rt_rq *rt_rq;
|
|
|
|
if (unlikely(!scheduler_running))
|
|
return;
|
|
|
|
/*
|
|
* Reset each runqueue's bandwidth settings
|
|
*/
|
|
for_each_rt_rq(rt_rq, iter, rq) {
|
|
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
|
|
|
raw_spin_lock(&rt_b->rt_runtime_lock);
|
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
|
rt_rq->rt_runtime = rt_b->rt_runtime;
|
|
rt_rq->rt_time = 0;
|
|
rt_rq->rt_throttled = 0;
|
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
|
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
|
}
|
|
}
|
|
|
|
static void balance_runtime(struct rt_rq *rt_rq)
|
|
{
|
|
if (!sched_feat(RT_RUNTIME_SHARE))
|
|
return;
|
|
|
|
if (rt_rq->rt_time > rt_rq->rt_runtime) {
|
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
|
do_balance_runtime(rt_rq);
|
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
|
}
|
|
}
|
|
#else /* !CONFIG_SMP */
|
|
static inline void balance_runtime(struct rt_rq *rt_rq) {}
|
|
#endif /* CONFIG_SMP */
|
|
|
|
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
|
{
|
|
int i, idle = 1, throttled = 0;
|
|
const struct cpumask *span;
|
|
|
|
span = sched_rt_period_mask();
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
/*
|
|
* FIXME: isolated CPUs should really leave the root task group,
|
|
* whether they are isolcpus or were isolated via cpusets, lest
|
|
* the timer run on a CPU which does not service all runqueues,
|
|
* potentially leaving other CPUs indefinitely throttled. If
|
|
* isolation is really required, the user will turn the throttle
|
|
* off to kill the perturbations it causes anyway. Meanwhile,
|
|
* this maintains functionality for boot and/or troubleshooting.
|
|
*/
|
|
if (rt_b == &root_task_group.rt_bandwidth)
|
|
span = cpu_online_mask;
|
|
#endif
|
|
for_each_cpu(i, span) {
|
|
int enqueue = 0;
|
|
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
|
|
struct rq *rq = rq_of_rt_rq(rt_rq);
|
|
|
|
raw_spin_lock(&rq->lock);
|
|
update_rq_clock(rq);
|
|
|
|
if (rt_rq->rt_time) {
|
|
u64 runtime;
|
|
|
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
|
if (rt_rq->rt_throttled)
|
|
balance_runtime(rt_rq);
|
|
runtime = rt_rq->rt_runtime;
|
|
rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
|
|
if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
|
|
rt_rq->rt_throttled = 0;
|
|
enqueue = 1;
|
|
|
|
/*
|
|
* When we're idle and a woken (rt) task is
|
|
* throttled check_preempt_curr() will set
|
|
* skip_update and the time between the wakeup
|
|
* and this unthrottle will get accounted as
|
|
* 'runtime'.
|
|
*/
|
|
if (rt_rq->rt_nr_running && rq->curr == rq->idle)
|
|
rq_clock_skip_update(rq, false);
|
|
}
|
|
if (rt_rq->rt_time || rt_rq->rt_nr_running)
|
|
idle = 0;
|
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
|
} else if (rt_rq->rt_nr_running) {
|
|
idle = 0;
|
|
if (!rt_rq_throttled(rt_rq))
|
|
enqueue = 1;
|
|
}
|
|
if (rt_rq->rt_throttled)
|
|
throttled = 1;
|
|
|
|
if (enqueue)
|
|
sched_rt_rq_enqueue(rt_rq);
|
|
raw_spin_unlock(&rq->lock);
|
|
}
|
|
|
|
if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
|
|
return 1;
|
|
|
|
return idle;
|
|
}
|
|
|
|
static inline int rt_se_prio(struct sched_rt_entity *rt_se)
|
|
{
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
struct rt_rq *rt_rq = group_rt_rq(rt_se);
|
|
|
|
if (rt_rq)
|
|
return rt_rq->highest_prio.curr;
|
|
#endif
|
|
|
|
return rt_task_of(rt_se)->prio;
|
|
}
|
|
|
|
static void dump_throttled_rt_tasks(struct rt_rq *rt_rq)
|
|
{
|
|
struct rt_prio_array *array = &rt_rq->active;
|
|
struct sched_rt_entity *rt_se;
|
|
char buf[500];
|
|
char *pos = buf;
|
|
char *end = buf + sizeof(buf);
|
|
int idx;
|
|
|
|
pos += snprintf(pos, sizeof(buf),
|
|
"sched: RT throttling activated for rt_rq %p (cpu %d)\n",
|
|
rt_rq, cpu_of(rq_of_rt_rq(rt_rq)));
|
|
|
|
if (bitmap_empty(array->bitmap, MAX_RT_PRIO))
|
|
goto out;
|
|
|
|
pos += snprintf(pos, end - pos, "potential CPU hogs:\n");
|
|
idx = sched_find_first_bit(array->bitmap);
|
|
while (idx < MAX_RT_PRIO) {
|
|
list_for_each_entry(rt_se, array->queue + idx, run_list) {
|
|
struct task_struct *p;
|
|
|
|
if (!rt_entity_is_task(rt_se))
|
|
continue;
|
|
|
|
p = rt_task_of(rt_se);
|
|
if (pos < end)
|
|
pos += snprintf(pos, end - pos, "\t%s (%d)\n",
|
|
p->comm, p->pid);
|
|
}
|
|
idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx + 1);
|
|
}
|
|
out:
|
|
#ifdef CONFIG_PANIC_ON_RT_THROTTLING
|
|
/*
|
|
* Use pr_err() in the BUG() case since printk_sched() will
|
|
* not get flushed and deadlock is not a concern.
|
|
*/
|
|
pr_err("%s", buf);
|
|
BUG();
|
|
#else
|
|
printk_deferred("%s", buf);
|
|
#endif
|
|
}
|
|
|
|
static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
|
|
{
|
|
u64 runtime = sched_rt_runtime(rt_rq);
|
|
|
|
if (rt_rq->rt_throttled)
|
|
return rt_rq_throttled(rt_rq);
|
|
|
|
if (runtime >= sched_rt_period(rt_rq))
|
|
return 0;
|
|
|
|
balance_runtime(rt_rq);
|
|
runtime = sched_rt_runtime(rt_rq);
|
|
if (runtime == RUNTIME_INF)
|
|
return 0;
|
|
|
|
if (rt_rq->rt_time > runtime) {
|
|
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
|
|
|
/*
|
|
* Don't actually throttle groups that have no runtime assigned
|
|
* but accrue some time due to boosting.
|
|
*/
|
|
if (likely(rt_b->rt_runtime)) {
|
|
static bool once = false;
|
|
|
|
rt_rq->rt_throttled = 1;
|
|
|
|
if (!once) {
|
|
once = true;
|
|
dump_throttled_rt_tasks(rt_rq);
|
|
}
|
|
} else {
|
|
/*
|
|
* In case we did anyway, make it go away,
|
|
* replenishment is a joke, since it will replenish us
|
|
* with exactly 0 ns.
|
|
*/
|
|
rt_rq->rt_time = 0;
|
|
}
|
|
|
|
if (rt_rq_throttled(rt_rq)) {
|
|
sched_rt_rq_dequeue(rt_rq);
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* TODO: Make configurable */
|
|
#define RT_SCHEDTUNE_INTERVAL 50000000ULL
|
|
|
|
static enum hrtimer_restart rt_schedtune_timer(struct hrtimer *timer)
|
|
{
|
|
struct sched_rt_entity *rt_se = container_of(timer,
|
|
struct sched_rt_entity,
|
|
schedtune_timer);
|
|
struct task_struct *p = rt_task_of(rt_se);
|
|
struct rq *rq = task_rq(p);
|
|
|
|
raw_spin_lock(&rq->lock);
|
|
|
|
/*
|
|
* Nothing to do if:
|
|
* - task has switched runqueues
|
|
* - task isn't RT anymore
|
|
*/
|
|
if (rq != task_rq(p) || (p->sched_class != &rt_sched_class))
|
|
goto out;
|
|
|
|
/*
|
|
* If task got enqueued back during callback time, it means we raced
|
|
* with the enqueue on another cpu, that's Ok, just do nothing as
|
|
* enqueue path would have tried to cancel us and we shouldn't run
|
|
* Also check the schedtune_enqueued flag as class-switch on a
|
|
* sleeping task may have already canceled the timer and done dq
|
|
*/
|
|
if (p->on_rq || !rt_se->schedtune_enqueued)
|
|
goto out;
|
|
|
|
/*
|
|
* RT task is no longer active, cancel boost
|
|
*/
|
|
rt_se->schedtune_enqueued = false;
|
|
schedtune_dequeue_task(p, cpu_of(rq));
|
|
cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
|
|
out:
|
|
raw_spin_unlock(&rq->lock);
|
|
|
|
/*
|
|
* This can free the task_struct if no more references.
|
|
*/
|
|
put_task_struct(p);
|
|
|
|
return HRTIMER_NORESTART;
|
|
}
|
|
|
|
void init_rt_schedtune_timer(struct sched_rt_entity *rt_se)
|
|
{
|
|
struct hrtimer *timer = &rt_se->schedtune_timer;
|
|
|
|
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
timer->function = rt_schedtune_timer;
|
|
rt_se->schedtune_enqueued = false;
|
|
}
|
|
|
|
static void start_schedtune_timer(struct sched_rt_entity *rt_se)
|
|
{
|
|
struct hrtimer *timer = &rt_se->schedtune_timer;
|
|
|
|
hrtimer_start(timer, ns_to_ktime(RT_SCHEDTUNE_INTERVAL),
|
|
HRTIMER_MODE_REL_PINNED);
|
|
}
|
|
|
|
/*
|
|
* Update the current task's runtime statistics. Skip current tasks that
|
|
* are not in our scheduling class.
|
|
*/
|
|
static void update_curr_rt(struct rq *rq)
|
|
{
|
|
struct task_struct *curr = rq->curr;
|
|
struct sched_rt_entity *rt_se = &curr->rt;
|
|
u64 delta_exec;
|
|
|
|
if (curr->sched_class != &rt_sched_class)
|
|
return;
|
|
|
|
delta_exec = rq_clock_task(rq) - curr->se.exec_start;
|
|
if (unlikely((s64)delta_exec <= 0))
|
|
return;
|
|
|
|
/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
|
|
cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
|
|
|
|
schedstat_set(curr->se.statistics.exec_max,
|
|
max(curr->se.statistics.exec_max, delta_exec));
|
|
|
|
curr->se.sum_exec_runtime += delta_exec;
|
|
account_group_exec_runtime(curr, delta_exec);
|
|
|
|
curr->se.exec_start = rq_clock_task(rq);
|
|
cpuacct_charge(curr, delta_exec);
|
|
|
|
sched_rt_avg_update(rq, delta_exec);
|
|
|
|
if (!rt_bandwidth_enabled())
|
|
return;
|
|
|
|
for_each_sched_rt_entity(rt_se) {
|
|
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
|
|
|
|
if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
|
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
|
rt_rq->rt_time += delta_exec;
|
|
if (sched_rt_runtime_exceeded(rt_rq))
|
|
resched_curr(rq);
|
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
|
}
|
|
}
|
|
}
|
|
|
|
static void
|
|
dequeue_top_rt_rq(struct rt_rq *rt_rq)
|
|
{
|
|
struct rq *rq = rq_of_rt_rq(rt_rq);
|
|
|
|
BUG_ON(&rq->rt != rt_rq);
|
|
|
|
if (!rt_rq->rt_queued)
|
|
return;
|
|
|
|
BUG_ON(!rq->nr_running);
|
|
|
|
sub_nr_running(rq, rt_rq->rt_nr_running);
|
|
rt_rq->rt_queued = 0;
|
|
}
|
|
|
|
static void
|
|
enqueue_top_rt_rq(struct rt_rq *rt_rq)
|
|
{
|
|
struct rq *rq = rq_of_rt_rq(rt_rq);
|
|
|
|
BUG_ON(&rq->rt != rt_rq);
|
|
|
|
if (rt_rq->rt_queued)
|
|
return;
|
|
if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
|
|
return;
|
|
|
|
add_nr_running(rq, rt_rq->rt_nr_running);
|
|
rt_rq->rt_queued = 1;
|
|
}
|
|
|
|
#if defined CONFIG_SMP
|
|
|
|
static void
|
|
inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
|
|
{
|
|
struct rq *rq = rq_of_rt_rq(rt_rq);
|
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
/*
|
|
* Change rq's cpupri only if rt_rq is the top queue.
|
|
*/
|
|
if (&rq->rt != rt_rq)
|
|
return;
|
|
#endif
|
|
if (rq->online && prio < prev_prio)
|
|
cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
|
|
}
|
|
|
|
static void
|
|
dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
|
|
{
|
|
struct rq *rq = rq_of_rt_rq(rt_rq);
|
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
/*
|
|
* Change rq's cpupri only if rt_rq is the top queue.
|
|
*/
|
|
if (&rq->rt != rt_rq)
|
|
return;
|
|
#endif
|
|
if (rq->online && rt_rq->highest_prio.curr != prev_prio)
|
|
cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
|
|
}
|
|
|
|
#else /* CONFIG_SMP */
|
|
|
|
static inline
|
|
void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
|
|
static inline
|
|
void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
|
|
static void
|
|
inc_rt_prio(struct rt_rq *rt_rq, int prio)
|
|
{
|
|
int prev_prio = rt_rq->highest_prio.curr;
|
|
|
|
if (prio < prev_prio)
|
|
rt_rq->highest_prio.curr = prio;
|
|
|
|
inc_rt_prio_smp(rt_rq, prio, prev_prio);
|
|
}
|
|
|
|
static void
|
|
dec_rt_prio(struct rt_rq *rt_rq, int prio)
|
|
{
|
|
int prev_prio = rt_rq->highest_prio.curr;
|
|
|
|
if (rt_rq->rt_nr_running) {
|
|
|
|
WARN_ON(prio < prev_prio);
|
|
|
|
/*
|
|
* This may have been our highest task, and therefore
|
|
* we may have some recomputation to do
|
|
*/
|
|
if (prio == prev_prio) {
|
|
struct rt_prio_array *array = &rt_rq->active;
|
|
|
|
rt_rq->highest_prio.curr =
|
|
sched_find_first_bit(array->bitmap);
|
|
}
|
|
|
|
} else
|
|
rt_rq->highest_prio.curr = MAX_RT_PRIO;
|
|
|
|
dec_rt_prio_smp(rt_rq, prio, prev_prio);
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
|
|
static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
|
|
|
|
#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
|
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
|
|
static void
|
|
inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
|
{
|
|
if (rt_se_boosted(rt_se))
|
|
rt_rq->rt_nr_boosted++;
|
|
|
|
if (rt_rq->tg)
|
|
start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
|
|
}
|
|
|
|
static void
|
|
dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
|
{
|
|
if (rt_se_boosted(rt_se))
|
|
rt_rq->rt_nr_boosted--;
|
|
|
|
WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
|
|
}
|
|
|
|
#else /* CONFIG_RT_GROUP_SCHED */
|
|
|
|
static void
|
|
inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
|
{
|
|
start_rt_bandwidth(&def_rt_bandwidth);
|
|
}
|
|
|
|
static inline
|
|
void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
|
|
|
|
#endif /* CONFIG_RT_GROUP_SCHED */
|
|
|
|
static inline
|
|
unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
|
|
{
|
|
struct rt_rq *group_rq = group_rt_rq(rt_se);
|
|
|
|
if (group_rq)
|
|
return group_rq->rt_nr_running;
|
|
else
|
|
return 1;
|
|
}
|
|
|
|
static inline
|
|
void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
|
{
|
|
int prio = rt_se_prio(rt_se);
|
|
|
|
WARN_ON(!rt_prio(prio));
|
|
rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
|
|
|
|
inc_rt_prio(rt_rq, prio);
|
|
inc_rt_migration(rt_se, rt_rq);
|
|
inc_rt_group(rt_se, rt_rq);
|
|
}
|
|
|
|
static inline
|
|
void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
|
{
|
|
WARN_ON(!rt_prio(rt_se_prio(rt_se)));
|
|
WARN_ON(!rt_rq->rt_nr_running);
|
|
rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
|
|
|
|
dec_rt_prio(rt_rq, rt_se_prio(rt_se));
|
|
dec_rt_migration(rt_se, rt_rq);
|
|
dec_rt_group(rt_se, rt_rq);
|
|
}
|
|
|
|
static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
|
|
{
|
|
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
|
|
struct rt_prio_array *array = &rt_rq->active;
|
|
struct rt_rq *group_rq = group_rt_rq(rt_se);
|
|
struct list_head *queue = array->queue + rt_se_prio(rt_se);
|
|
|
|
/*
|
|
* Don't enqueue the group if its throttled, or when empty.
|
|
* The latter is a consequence of the former when a child group
|
|
* get throttled and the current group doesn't have any other
|
|
* active members.
|
|
*/
|
|
if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
|
|
return;
|
|
|
|
if (head)
|
|
list_add(&rt_se->run_list, queue);
|
|
else
|
|
list_add_tail(&rt_se->run_list, queue);
|
|
__set_bit(rt_se_prio(rt_se), array->bitmap);
|
|
|
|
inc_rt_tasks(rt_se, rt_rq);
|
|
}
|
|
|
|
static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
|
|
{
|
|
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
|
|
struct rt_prio_array *array = &rt_rq->active;
|
|
|
|
list_del_init(&rt_se->run_list);
|
|
if (list_empty(array->queue + rt_se_prio(rt_se)))
|
|
__clear_bit(rt_se_prio(rt_se), array->bitmap);
|
|
|
|
dec_rt_tasks(rt_se, rt_rq);
|
|
}
|
|
|
|
/*
|
|
* Because the prio of an upper entry depends on the lower
|
|
* entries, we must remove entries top - down.
|
|
*/
|
|
static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
|
|
{
|
|
struct sched_rt_entity *back = NULL;
|
|
|
|
for_each_sched_rt_entity(rt_se) {
|
|
rt_se->back = back;
|
|
back = rt_se;
|
|
}
|
|
|
|
dequeue_top_rt_rq(rt_rq_of_se(back));
|
|
|
|
for (rt_se = back; rt_se; rt_se = rt_se->back) {
|
|
if (on_rt_rq(rt_se))
|
|
__dequeue_rt_entity(rt_se);
|
|
}
|
|
}
|
|
|
|
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
|
|
{
|
|
struct rq *rq = rq_of_rt_se(rt_se);
|
|
|
|
dequeue_rt_stack(rt_se);
|
|
for_each_sched_rt_entity(rt_se)
|
|
__enqueue_rt_entity(rt_se, head);
|
|
enqueue_top_rt_rq(&rq->rt);
|
|
}
|
|
|
|
static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
|
|
{
|
|
struct rq *rq = rq_of_rt_se(rt_se);
|
|
|
|
dequeue_rt_stack(rt_se);
|
|
|
|
for_each_sched_rt_entity(rt_se) {
|
|
struct rt_rq *rt_rq = group_rt_rq(rt_se);
|
|
|
|
if (rt_rq && rt_rq->rt_nr_running)
|
|
__enqueue_rt_entity(rt_se, false);
|
|
}
|
|
enqueue_top_rt_rq(&rq->rt);
|
|
}
|
|
|
|
/*
|
|
* Keep track of whether each cpu has an RT task that will
|
|
* soon schedule on that core. The problem this is intended
|
|
* to address is that we want to avoid entering a non-preemptible
|
|
* softirq handler if we are about to schedule a real-time
|
|
* task on that core. Ideally, we could just check whether
|
|
* the RT runqueue on that core had a runnable task, but the
|
|
* window between choosing to schedule a real-time task
|
|
* on a core and actually enqueueing it on that run-queue
|
|
* is large enough to lose races at an unacceptably high rate.
|
|
*
|
|
* This variable attempts to reduce that window by indicating
|
|
* when we have decided to schedule an RT task on a core
|
|
* but not yet enqueued it.
|
|
* This variable is a heuristic only: it is not guaranteed
|
|
* to be correct and may be updated without synchronization.
|
|
*/
|
|
DEFINE_PER_CPU(bool, incoming_rt_task);
|
|
|
|
/*
|
|
* Adding/removing a task to/from a priority array:
|
|
*/
|
|
static void
|
|
enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
struct sched_rt_entity *rt_se = &p->rt;
|
|
|
|
if (flags & ENQUEUE_WAKEUP)
|
|
rt_se->timeout = 0;
|
|
|
|
enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
|
|
walt_inc_cumulative_runnable_avg(rq, p);
|
|
|
|
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
|
|
enqueue_pushable_task(rq, p);
|
|
|
|
*per_cpu_ptr(&incoming_rt_task, cpu_of(rq)) = false;
|
|
|
|
if (!schedtune_task_boost(p))
|
|
return;
|
|
|
|
/*
|
|
* If schedtune timer is active, that means a boost was already
|
|
* done, just cancel the timer so that deboost doesn't happen.
|
|
* Otherwise, increase the boost. If an enqueued timer was
|
|
* cancelled, put the task reference.
|
|
*/
|
|
if (hrtimer_try_to_cancel(&rt_se->schedtune_timer) == 1)
|
|
put_task_struct(p);
|
|
|
|
/*
|
|
* schedtune_enqueued can be true in the following situation:
|
|
* enqueue_task_rt grabs rq lock before timer fires
|
|
* or before its callback acquires rq lock
|
|
* schedtune_enqueued can be false if timer callback is running
|
|
* and timer just released rq lock, or if the timer finished
|
|
* running and canceling the boost
|
|
*/
|
|
if (rt_se->schedtune_enqueued)
|
|
return;
|
|
|
|
rt_se->schedtune_enqueued = true;
|
|
schedtune_enqueue_task(p, cpu_of(rq));
|
|
cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
|
|
}
|
|
|
|
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
struct sched_rt_entity *rt_se = &p->rt;
|
|
|
|
update_curr_rt(rq);
|
|
dequeue_rt_entity(rt_se);
|
|
walt_dec_cumulative_runnable_avg(rq, p);
|
|
|
|
dequeue_pushable_task(rq, p);
|
|
|
|
if (!rt_se->schedtune_enqueued)
|
|
return;
|
|
|
|
if (flags == DEQUEUE_SLEEP) {
|
|
get_task_struct(p);
|
|
start_schedtune_timer(rt_se);
|
|
return;
|
|
}
|
|
|
|
rt_se->schedtune_enqueued = false;
|
|
schedtune_dequeue_task(p, cpu_of(rq));
|
|
cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
|
|
}
|
|
|
|
/*
|
|
* Put task to the head or the end of the run list without the overhead of
|
|
* dequeue followed by enqueue.
|
|
*/
|
|
static void
|
|
requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
|
|
{
|
|
if (on_rt_rq(rt_se)) {
|
|
struct rt_prio_array *array = &rt_rq->active;
|
|
struct list_head *queue = array->queue + rt_se_prio(rt_se);
|
|
|
|
if (head)
|
|
list_move(&rt_se->run_list, queue);
|
|
else
|
|
list_move_tail(&rt_se->run_list, queue);
|
|
}
|
|
}
|
|
|
|
static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
|
|
{
|
|
struct sched_rt_entity *rt_se = &p->rt;
|
|
struct rt_rq *rt_rq;
|
|
|
|
for_each_sched_rt_entity(rt_se) {
|
|
rt_rq = rt_rq_of_se(rt_se);
|
|
requeue_rt_entity(rt_rq, rt_se, head);
|
|
}
|
|
}
|
|
|
|
static void yield_task_rt(struct rq *rq)
|
|
{
|
|
requeue_task_rt(rq, rq->curr, 0);
|
|
}
|
|
|
|
/*
|
|
* Return whether the given cpu has (or will shortly have) an RT task
|
|
* ready to run. NB: This is a heuristic and is subject to races.
|
|
*/
|
|
bool
|
|
cpu_has_rt_task(int cpu)
|
|
{
|
|
struct rq *rq = cpu_rq(cpu);
|
|
return rq->rt.rt_nr_running > 0 || per_cpu(incoming_rt_task, cpu);
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
static int find_lowest_rq(struct task_struct *task, int sync);
|
|
|
|
/*
|
|
* Return whether the task on the given cpu is currently non-preemptible
|
|
* while handling a potentially long softint, or if the task is likely
|
|
* to block preemptions soon because (a) it is a ksoftirq thread that is
|
|
* handling slow softints, (b) it is idle and therefore likely to start
|
|
* processing the irq's immediately, (c) the cpu is currently handling
|
|
* hard irq's and will soon move on to the softirq handler.
|
|
*/
|
|
bool
|
|
task_may_not_preempt(struct task_struct *task, int cpu)
|
|
{
|
|
__u32 softirqs = per_cpu(active_softirqs, cpu) |
|
|
__IRQ_STAT(cpu, __softirq_pending);
|
|
struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
|
|
return ((softirqs & LONG_SOFTIRQ_MASK) &&
|
|
(task == cpu_ksoftirqd || is_idle_task(task) ||
|
|
(task_thread_info(task)->preempt_count
|
|
& (HARDIRQ_MASK | SOFTIRQ_MASK))));
|
|
}
|
|
|
|
/*
|
|
* Perform a schedtune dequeue and cancelation of boost timers if needed.
|
|
* Should be called only with the rq->lock held.
|
|
*/
|
|
static void schedtune_dequeue_rt(struct rq *rq, struct task_struct *p)
|
|
{
|
|
struct sched_rt_entity *rt_se = &p->rt;
|
|
|
|
BUG_ON(!raw_spin_is_locked(&rq->lock));
|
|
|
|
if (!rt_se->schedtune_enqueued)
|
|
return;
|
|
|
|
/*
|
|
* Incase of class change cancel any active timers. If an enqueued
|
|
* timer was cancelled, put the task ref.
|
|
*/
|
|
if (hrtimer_try_to_cancel(&rt_se->schedtune_timer) == 1)
|
|
put_task_struct(p);
|
|
|
|
/* schedtune_enqueued is true, deboost it */
|
|
rt_se->schedtune_enqueued = false;
|
|
schedtune_dequeue_task(p, task_cpu(p));
|
|
cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
|
|
}
|
|
|
|
static int
|
|
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
|
|
int sibling_count_hint)
|
|
{
|
|
struct task_struct *curr, *tgt_task;
|
|
struct rq *rq;
|
|
bool may_not_preempt;
|
|
int target;
|
|
int sync = flags & WF_SYNC;
|
|
|
|
/* For anything but wake ups, just return the task_cpu */
|
|
if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
|
|
goto out;
|
|
|
|
rq = cpu_rq(cpu);
|
|
|
|
rcu_read_lock();
|
|
curr = READ_ONCE(rq->curr); /* unlocked access */
|
|
|
|
may_not_preempt = task_may_not_preempt(curr, cpu);
|
|
target = find_lowest_rq(p, sync);
|
|
|
|
/*
|
|
* Check once for losing a race with the other core's irq handler.
|
|
* This does not happen frequently, but it can avoid delaying
|
|
* the execution of the RT task in those cases.
|
|
*/
|
|
if (target != -1) {
|
|
tgt_task = READ_ONCE(cpu_rq(target)->curr);
|
|
if (task_may_not_preempt(tgt_task, target))
|
|
target = find_lowest_rq(p, sync);
|
|
}
|
|
/*
|
|
* Possible race. Don't bother moving it if the
|
|
* destination CPU is not running a lower priority task.
|
|
*/
|
|
if (target != -1 &&
|
|
(may_not_preempt || p->prio < cpu_rq(target)->rt.highest_prio.curr))
|
|
cpu = target;
|
|
*per_cpu_ptr(&incoming_rt_task, cpu) = true;
|
|
rcu_read_unlock();
|
|
out:
|
|
/*
|
|
* If previous CPU was different, make sure to cancel any active
|
|
* schedtune timers and deboost.
|
|
*/
|
|
if (task_cpu(p) != cpu) {
|
|
unsigned long fl;
|
|
struct rq *prq = task_rq(p);
|
|
|
|
raw_spin_lock_irqsave(&prq->lock, fl);
|
|
schedtune_dequeue_rt(prq, p);
|
|
raw_spin_unlock_irqrestore(&prq->lock, fl);
|
|
}
|
|
|
|
return cpu;
|
|
}
|
|
|
|
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
|
|
{
|
|
/*
|
|
* Current can't be migrated, useless to reschedule,
|
|
* let's hope p can move out.
|
|
*/
|
|
if (rq->curr->nr_cpus_allowed == 1 ||
|
|
!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
|
|
return;
|
|
|
|
/*
|
|
* p is migratable, so let's not schedule it and
|
|
* see if it is pushed or pulled somewhere else.
|
|
*/
|
|
if (p->nr_cpus_allowed != 1
|
|
&& cpupri_find(&rq->rd->cpupri, p, NULL))
|
|
return;
|
|
|
|
/*
|
|
* There appears to be other cpus that can accept
|
|
* current and none to run 'p', so lets reschedule
|
|
* to try and push current away:
|
|
*/
|
|
requeue_task_rt(rq, p, 1);
|
|
resched_curr(rq);
|
|
}
|
|
#endif /* CONFIG_SMP */
|
|
|
|
/*
|
|
* Preempt the current task with a newly woken task if needed:
|
|
*/
|
|
static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
if (p->prio < rq->curr->prio) {
|
|
resched_curr(rq);
|
|
return;
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
/*
|
|
* If:
|
|
*
|
|
* - the newly woken task is of equal priority to the current task
|
|
* - the newly woken task is non-migratable while current is migratable
|
|
* - current will be preempted on the next reschedule
|
|
*
|
|
* we should check to see if current can readily move to a different
|
|
* cpu. If so, we will reschedule to allow the push logic to try
|
|
* to move current somewhere else, making room for our non-migratable
|
|
* task.
|
|
*/
|
|
if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
|
|
check_preempt_equal_prio(rq, p);
|
|
#endif
|
|
}
|
|
|
|
static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
|
|
struct rt_rq *rt_rq)
|
|
{
|
|
struct rt_prio_array *array = &rt_rq->active;
|
|
struct sched_rt_entity *next = NULL;
|
|
struct list_head *queue;
|
|
int idx;
|
|
|
|
idx = sched_find_first_bit(array->bitmap);
|
|
BUG_ON(idx >= MAX_RT_PRIO);
|
|
|
|
queue = array->queue + idx;
|
|
next = list_entry(queue->next, struct sched_rt_entity, run_list);
|
|
|
|
return next;
|
|
}
|
|
|
|
static struct task_struct *_pick_next_task_rt(struct rq *rq)
|
|
{
|
|
struct sched_rt_entity *rt_se;
|
|
struct task_struct *p;
|
|
struct rt_rq *rt_rq = &rq->rt;
|
|
|
|
do {
|
|
rt_se = pick_next_rt_entity(rq, rt_rq);
|
|
BUG_ON(!rt_se);
|
|
rt_rq = group_rt_rq(rt_se);
|
|
} while (rt_rq);
|
|
|
|
p = rt_task_of(rt_se);
|
|
p->se.exec_start = rq_clock_task(rq);
|
|
|
|
return p;
|
|
}
|
|
|
|
static struct task_struct *
|
|
pick_next_task_rt(struct rq *rq, struct task_struct *prev)
|
|
{
|
|
struct task_struct *p;
|
|
struct rt_rq *rt_rq = &rq->rt;
|
|
|
|
if (need_pull_rt_task(rq, prev)) {
|
|
/*
|
|
* This is OK, because current is on_cpu, which avoids it being
|
|
* picked for load-balance and preemption/IRQs are still
|
|
* disabled avoiding further scheduler activity on it and we're
|
|
* being very careful to re-start the picking loop.
|
|
*/
|
|
lockdep_unpin_lock(&rq->lock);
|
|
pull_rt_task(rq);
|
|
lockdep_pin_lock(&rq->lock);
|
|
/*
|
|
* pull_rt_task() can drop (and re-acquire) rq->lock; this
|
|
* means a dl or stop task can slip in, in which case we need
|
|
* to re-start task selection.
|
|
*/
|
|
if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
|
|
rq->dl.dl_nr_running))
|
|
return RETRY_TASK;
|
|
}
|
|
|
|
/*
|
|
* We may dequeue prev's rt_rq in put_prev_task().
|
|
* So, we update time before rt_nr_running check.
|
|
*/
|
|
if (prev->sched_class == &rt_sched_class)
|
|
update_curr_rt(rq);
|
|
|
|
if (!rt_rq->rt_queued)
|
|
return NULL;
|
|
|
|
put_prev_task(rq, prev);
|
|
|
|
p = _pick_next_task_rt(rq);
|
|
|
|
/* The running task is never eligible for pushing */
|
|
dequeue_pushable_task(rq, p);
|
|
|
|
queue_push_tasks(rq);
|
|
|
|
return p;
|
|
}
|
|
|
|
static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
|
|
{
|
|
update_curr_rt(rq);
|
|
|
|
/*
|
|
* The previous task needs to be made eligible for pushing
|
|
* if it is still active
|
|
*/
|
|
if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
|
|
enqueue_pushable_task(rq, p);
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/* Only try algorithms three times */
|
|
#define RT_MAX_TRIES 3
|
|
|
|
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
|
|
{
|
|
if (!task_running(rq, p) &&
|
|
cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
|
|
return 1;
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Return the highest pushable rq's task, which is suitable to be executed
|
|
* on the cpu, NULL otherwise
|
|
*/
|
|
static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
|
|
{
|
|
struct plist_head *head = &rq->rt.pushable_tasks;
|
|
struct task_struct *p;
|
|
|
|
if (!has_pushable_tasks(rq))
|
|
return NULL;
|
|
|
|
plist_for_each_entry(p, head, pushable_tasks) {
|
|
if (pick_rt_task(rq, p, cpu))
|
|
return p;
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
|
|
|
|
static int find_best_rt_target(struct task_struct* task, int cpu,
|
|
struct cpumask* lowest_mask,
|
|
bool boosted, bool prefer_idle) {
|
|
int iter_cpu;
|
|
int target_cpu = -1;
|
|
int boosted_cpu = -1;
|
|
int backup_cpu = -1;
|
|
int boosted_orig_capacity = capacity_orig_of(0);
|
|
int backup_capacity = 0;
|
|
int best_idle_cpu = -1;
|
|
unsigned long target_util = 0;
|
|
unsigned long new_util;
|
|
/* We want to elect the best one based on task class,
|
|
* idleness, and utilization.
|
|
*/
|
|
for (iter_cpu = 0; iter_cpu < NR_CPUS; iter_cpu++) {
|
|
int cur_capacity;
|
|
/*
|
|
* Iterate from higher cpus for boosted tasks.
|
|
*/
|
|
int i = boosted ? NR_CPUS-iter_cpu-1 : iter_cpu;
|
|
if (!cpu_online(i) || !cpumask_test_cpu(i, tsk_cpus_allowed(task)))
|
|
continue;
|
|
|
|
new_util = cpu_util(i) + task_util(task);
|
|
|
|
if (new_util > capacity_orig_of(i))
|
|
continue;
|
|
|
|
/*
|
|
* Unconditionally favoring tasks that prefer idle cpus to
|
|
* improve latency.
|
|
*/
|
|
if (idle_cpu(i) && prefer_idle
|
|
&& cpumask_test_cpu(i, lowest_mask) && best_idle_cpu < 0) {
|
|
best_idle_cpu = i;
|
|
continue;
|
|
}
|
|
|
|
if (cpumask_test_cpu(i, lowest_mask)) {
|
|
/* Bias cpu selection towards cpu with higher original
|
|
* capacity if task is boosted.
|
|
* Assumption: Higher cpus are exclusively alloted for
|
|
* boosted tasks.
|
|
*/
|
|
if (boosted && boosted_cpu < 0
|
|
&& boosted_orig_capacity < capacity_orig_of(i)) {
|
|
boosted_cpu = i;
|
|
boosted_orig_capacity = capacity_orig_of(i);
|
|
}
|
|
cur_capacity = capacity_curr_of(i);
|
|
if (new_util < cur_capacity && cpu_rq(i)->nr_running) {
|
|
if(!boosted) {
|
|
/* Find a target cpu with highest utilization.*/
|
|
if (target_util < new_util) {
|
|
target_cpu = i;
|
|
target_util = new_util;
|
|
}
|
|
} else {
|
|
if (target_util == 0 || target_util > new_util) {
|
|
/* Find a target cpu with lowest utilization.*/
|
|
target_cpu = i;
|
|
target_util = new_util;
|
|
}
|
|
}
|
|
} else if (backup_capacity == 0 || backup_capacity < cur_capacity) {
|
|
/* Select a backup CPU with highest capacity.*/
|
|
backup_capacity = cur_capacity;
|
|
backup_cpu = i;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (boosted && boosted_cpu >=0 && boosted_cpu > best_idle_cpu)
|
|
target_cpu = boosted_cpu;
|
|
else if (prefer_idle && best_idle_cpu >= 0)
|
|
target_cpu = best_idle_cpu;
|
|
|
|
if (target_cpu < 0) {
|
|
if (backup_cpu >= 0)
|
|
return backup_cpu;
|
|
|
|
/* Select current cpu if it is present in the mask.*/
|
|
if (cpumask_test_cpu(cpu, lowest_mask))
|
|
return cpu;
|
|
|
|
/* Pick a random cpu from lowest_mask */
|
|
target_cpu = cpumask_any(lowest_mask);
|
|
if (target_cpu < nr_cpu_ids)
|
|
return target_cpu;
|
|
return -1;
|
|
}
|
|
return target_cpu;
|
|
}
|
|
|
|
static int find_lowest_rq(struct task_struct *task, int sync)
|
|
{
|
|
struct sched_domain *sd;
|
|
struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
|
|
int this_cpu = smp_processor_id();
|
|
int cpu = task_cpu(task);
|
|
bool boosted, prefer_idle;
|
|
|
|
/* Make sure the mask is initialized first */
|
|
if (unlikely(!lowest_mask))
|
|
return -1;
|
|
|
|
if (task->nr_cpus_allowed == 1)
|
|
return -1; /* No other targets possible */
|
|
|
|
/* Constructing cpumask of lowest priorities */
|
|
if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
|
|
return -1; /* No targets found */
|
|
|
|
/* Return current cpu if WF_SYNC hint is set and present in
|
|
* lowest_mask. Improves data locality.
|
|
*/
|
|
if (sysctl_sched_sync_hint_enable && sync) {
|
|
cpumask_t search_cpus;
|
|
cpumask_and(&search_cpus, tsk_cpus_allowed(task), lowest_mask);
|
|
if (cpumask_test_cpu(cpu, &search_cpus))
|
|
return cpu;
|
|
}
|
|
|
|
/*
|
|
* At this point we have built a mask of cpus representing the
|
|
* lowest priority tasks in the system.
|
|
*/
|
|
|
|
boosted = schedtune_task_boost(task) > 0;
|
|
prefer_idle = schedtune_prefer_idle(task) > 0;
|
|
if(boosted || prefer_idle) {
|
|
return find_best_rt_target(task, cpu, lowest_mask, boosted, prefer_idle);
|
|
} else {
|
|
/* Now we want to elect the best one based on on our affinity
|
|
* and topology.
|
|
* We prioritize the last cpu that the task executed on since
|
|
* it is most likely cache-hot in that location.
|
|
*/
|
|
struct task_struct* curr;
|
|
if (!cpumask_test_cpu(this_cpu, lowest_mask))
|
|
this_cpu = -1; /* Skip this_cpu opt if not among lowest */
|
|
rcu_read_lock();
|
|
for_each_domain(cpu, sd) {
|
|
if (sd->flags & SD_WAKE_AFFINE) {
|
|
int best_cpu;
|
|
/*
|
|
* "this_cpu" is cheaper to preempt than a
|
|
* remote processor.
|
|
*/
|
|
if (this_cpu != -1 &&
|
|
cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
|
|
curr = cpu_rq(this_cpu)->curr;
|
|
/* Ensuring that boosted/prefer idle
|
|
* tasks are not pre-empted even if low
|
|
* priority*/
|
|
if (!curr || (schedtune_task_boost(curr) == 0
|
|
&& schedtune_prefer_idle(curr) == 0)) {
|
|
rcu_read_unlock();
|
|
return this_cpu;
|
|
}
|
|
}
|
|
|
|
best_cpu = cpumask_first_and(lowest_mask,
|
|
sched_domain_span(sd));
|
|
if (best_cpu < nr_cpu_ids) {
|
|
curr = cpu_rq(best_cpu)->curr;
|
|
/* Ensuring that boosted/prefer idle
|
|
* tasks are not pre-empted even if low
|
|
* priority*/
|
|
if(!curr || (schedtune_task_boost(curr) == 0
|
|
&& schedtune_prefer_idle(curr) == 0)) {
|
|
rcu_read_unlock();
|
|
return best_cpu;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
/* And finally, if there were no matches within the domains just
|
|
* give the caller *something* to work with from the compatible
|
|
* locations.
|
|
*/
|
|
if (this_cpu != -1)
|
|
return this_cpu;
|
|
|
|
cpu = cpumask_any(lowest_mask);
|
|
if (cpu < nr_cpu_ids)
|
|
return cpu;
|
|
return -1;
|
|
}
|
|
}
|
|
|
|
/* Will lock the rq it finds */
|
|
static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
|
|
{
|
|
struct rq *lowest_rq = NULL;
|
|
int tries;
|
|
int cpu;
|
|
|
|
for (tries = 0; tries < RT_MAX_TRIES; tries++) {
|
|
cpu = find_lowest_rq(task, 0);
|
|
|
|
if ((cpu == -1) || (cpu == rq->cpu))
|
|
break;
|
|
|
|
lowest_rq = cpu_rq(cpu);
|
|
|
|
if (lowest_rq->rt.highest_prio.curr <= task->prio) {
|
|
/*
|
|
* Target rq has tasks of equal or higher priority,
|
|
* retrying does not release any lock and is unlikely
|
|
* to yield a different result.
|
|
*/
|
|
lowest_rq = NULL;
|
|
break;
|
|
}
|
|
|
|
/* if the prio of this runqueue changed, try again */
|
|
if (double_lock_balance(rq, lowest_rq)) {
|
|
/*
|
|
* We had to unlock the run queue. In
|
|
* the mean time, task could have
|
|
* migrated already or had its affinity changed.
|
|
* Also make sure that it wasn't scheduled on its rq.
|
|
*/
|
|
if (unlikely(task_rq(task) != rq ||
|
|
!cpumask_test_cpu(lowest_rq->cpu,
|
|
tsk_cpus_allowed(task)) ||
|
|
task_running(rq, task) ||
|
|
!task_on_rq_queued(task))) {
|
|
|
|
double_unlock_balance(rq, lowest_rq);
|
|
lowest_rq = NULL;
|
|
break;
|
|
}
|
|
}
|
|
|
|
/* If this rq is still suitable use it. */
|
|
if (lowest_rq->rt.highest_prio.curr > task->prio)
|
|
break;
|
|
|
|
/* try again */
|
|
double_unlock_balance(rq, lowest_rq);
|
|
lowest_rq = NULL;
|
|
}
|
|
|
|
return lowest_rq;
|
|
}
|
|
|
|
static struct task_struct *pick_next_pushable_task(struct rq *rq)
|
|
{
|
|
struct task_struct *p;
|
|
|
|
if (!has_pushable_tasks(rq))
|
|
return NULL;
|
|
|
|
p = plist_first_entry(&rq->rt.pushable_tasks,
|
|
struct task_struct, pushable_tasks);
|
|
|
|
BUG_ON(rq->cpu != task_cpu(p));
|
|
BUG_ON(task_current(rq, p));
|
|
BUG_ON(p->nr_cpus_allowed <= 1);
|
|
|
|
BUG_ON(!task_on_rq_queued(p));
|
|
BUG_ON(!rt_task(p));
|
|
|
|
return p;
|
|
}
|
|
|
|
/*
|
|
* If the current CPU has more than one RT task, see if the non
|
|
* running task can migrate over to a CPU that is running a task
|
|
* of lesser priority.
|
|
*/
|
|
static int push_rt_task(struct rq *rq)
|
|
{
|
|
struct task_struct *next_task;
|
|
struct rq *lowest_rq;
|
|
int ret = 0;
|
|
|
|
if (!rq->rt.overloaded)
|
|
return 0;
|
|
|
|
next_task = pick_next_pushable_task(rq);
|
|
if (!next_task)
|
|
return 0;
|
|
|
|
retry:
|
|
if (unlikely(next_task == rq->curr)) {
|
|
WARN_ON(1);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* It's possible that the next_task slipped in of
|
|
* higher priority than current. If that's the case
|
|
* just reschedule current.
|
|
*/
|
|
if (unlikely(next_task->prio < rq->curr->prio)) {
|
|
resched_curr(rq);
|
|
return 0;
|
|
}
|
|
|
|
/* We might release rq lock */
|
|
get_task_struct(next_task);
|
|
|
|
/* find_lock_lowest_rq locks the rq if found */
|
|
lowest_rq = find_lock_lowest_rq(next_task, rq);
|
|
if (!lowest_rq) {
|
|
struct task_struct *task;
|
|
/*
|
|
* find_lock_lowest_rq releases rq->lock
|
|
* so it is possible that next_task has migrated.
|
|
*
|
|
* We need to make sure that the task is still on the same
|
|
* run-queue and is also still the next task eligible for
|
|
* pushing.
|
|
*/
|
|
task = pick_next_pushable_task(rq);
|
|
if (task_cpu(next_task) == rq->cpu && task == next_task) {
|
|
/*
|
|
* The task hasn't migrated, and is still the next
|
|
* eligible task, but we failed to find a run-queue
|
|
* to push it to. Do not retry in this case, since
|
|
* other cpus will pull from us when ready.
|
|
*/
|
|
goto out;
|
|
}
|
|
|
|
if (!task)
|
|
/* No more tasks, just exit */
|
|
goto out;
|
|
|
|
/*
|
|
* Something has shifted, try again.
|
|
*/
|
|
put_task_struct(next_task);
|
|
next_task = task;
|
|
goto retry;
|
|
}
|
|
|
|
deactivate_task(rq, next_task, 0);
|
|
next_task->on_rq = TASK_ON_RQ_MIGRATING;
|
|
set_task_cpu(next_task, lowest_rq->cpu);
|
|
next_task->on_rq = TASK_ON_RQ_QUEUED;
|
|
activate_task(lowest_rq, next_task, 0);
|
|
ret = 1;
|
|
|
|
resched_curr(lowest_rq);
|
|
|
|
double_unlock_balance(rq, lowest_rq);
|
|
|
|
out:
|
|
put_task_struct(next_task);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void push_rt_tasks(struct rq *rq)
|
|
{
|
|
/* push_rt_task will return true if it moved an RT */
|
|
while (push_rt_task(rq))
|
|
;
|
|
}
|
|
|
|
#ifdef HAVE_RT_PUSH_IPI
|
|
|
|
/*
|
|
* When a high priority task schedules out from a CPU and a lower priority
|
|
* task is scheduled in, a check is made to see if there's any RT tasks
|
|
* on other CPUs that are waiting to run because a higher priority RT task
|
|
* is currently running on its CPU. In this case, the CPU with multiple RT
|
|
* tasks queued on it (overloaded) needs to be notified that a CPU has opened
|
|
* up that may be able to run one of its non-running queued RT tasks.
|
|
*
|
|
* All CPUs with overloaded RT tasks need to be notified as there is currently
|
|
* no way to know which of these CPUs have the highest priority task waiting
|
|
* to run. Instead of trying to take a spinlock on each of these CPUs,
|
|
* which has shown to cause large latency when done on machines with many
|
|
* CPUs, sending an IPI to the CPUs to have them push off the overloaded
|
|
* RT tasks waiting to run.
|
|
*
|
|
* Just sending an IPI to each of the CPUs is also an issue, as on large
|
|
* count CPU machines, this can cause an IPI storm on a CPU, especially
|
|
* if its the only CPU with multiple RT tasks queued, and a large number
|
|
* of CPUs scheduling a lower priority task at the same time.
|
|
*
|
|
* Each root domain has its own irq work function that can iterate over
|
|
* all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
|
|
* tassk must be checked if there's one or many CPUs that are lowering
|
|
* their priority, there's a single irq work iterator that will try to
|
|
* push off RT tasks that are waiting to run.
|
|
*
|
|
* When a CPU schedules a lower priority task, it will kick off the
|
|
* irq work iterator that will jump to each CPU with overloaded RT tasks.
|
|
* As it only takes the first CPU that schedules a lower priority task
|
|
* to start the process, the rto_start variable is incremented and if
|
|
* the atomic result is one, then that CPU will try to take the rto_lock.
|
|
* This prevents high contention on the lock as the process handles all
|
|
* CPUs scheduling lower priority tasks.
|
|
*
|
|
* All CPUs that are scheduling a lower priority task will increment the
|
|
* rt_loop_next variable. This will make sure that the irq work iterator
|
|
* checks all RT overloaded CPUs whenever a CPU schedules a new lower
|
|
* priority task, even if the iterator is in the middle of a scan. Incrementing
|
|
* the rt_loop_next will cause the iterator to perform another scan.
|
|
*
|
|
*/
|
|
static int rto_next_cpu(struct root_domain *rd)
|
|
{
|
|
int next;
|
|
int cpu;
|
|
|
|
/*
|
|
* When starting the IPI RT pushing, the rto_cpu is set to -1,
|
|
* rt_next_cpu() will simply return the first CPU found in
|
|
* the rto_mask.
|
|
*
|
|
* If rto_next_cpu() is called with rto_cpu is a valid cpu, it
|
|
* will return the next CPU found in the rto_mask.
|
|
*
|
|
* If there are no more CPUs left in the rto_mask, then a check is made
|
|
* against rto_loop and rto_loop_next. rto_loop is only updated with
|
|
* the rto_lock held, but any CPU may increment the rto_loop_next
|
|
* without any locking.
|
|
*/
|
|
for (;;) {
|
|
|
|
/* When rto_cpu is -1 this acts like cpumask_first() */
|
|
cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
|
|
|
|
rd->rto_cpu = cpu;
|
|
|
|
if (cpu < nr_cpu_ids)
|
|
return cpu;
|
|
|
|
rd->rto_cpu = -1;
|
|
|
|
/*
|
|
* ACQUIRE ensures we see the @rto_mask changes
|
|
* made prior to the @next value observed.
|
|
*
|
|
* Matches WMB in rt_set_overload().
|
|
*/
|
|
next = atomic_read_acquire(&rd->rto_loop_next);
|
|
|
|
if (rd->rto_loop == next)
|
|
break;
|
|
|
|
rd->rto_loop = next;
|
|
}
|
|
|
|
return -1;
|
|
}
|
|
|
|
static inline bool rto_start_trylock(atomic_t *v)
|
|
{
|
|
return !atomic_cmpxchg_acquire(v, 0, 1);
|
|
}
|
|
|
|
static inline void rto_start_unlock(atomic_t *v)
|
|
{
|
|
atomic_set_release(v, 0);
|
|
}
|
|
|
|
static void tell_cpu_to_push(struct rq *rq)
|
|
{
|
|
int cpu = -1;
|
|
|
|
/* Keep the loop going if the IPI is currently active */
|
|
atomic_inc(&rq->rd->rto_loop_next);
|
|
|
|
/* Only one CPU can initiate a loop at a time */
|
|
if (!rto_start_trylock(&rq->rd->rto_loop_start))
|
|
return;
|
|
|
|
raw_spin_lock(&rq->rd->rto_lock);
|
|
|
|
/*
|
|
* The rto_cpu is updated under the lock, if it has a valid cpu
|
|
* then the IPI is still running and will continue due to the
|
|
* update to loop_next, and nothing needs to be done here.
|
|
* Otherwise it is finishing up and an ipi needs to be sent.
|
|
*/
|
|
if (rq->rd->rto_cpu < 0)
|
|
cpu = rto_next_cpu(rq->rd);
|
|
|
|
raw_spin_unlock(&rq->rd->rto_lock);
|
|
|
|
rto_start_unlock(&rq->rd->rto_loop_start);
|
|
|
|
if (cpu >= 0) {
|
|
/* Make sure the rd does not get freed while pushing */
|
|
sched_get_rd(rq->rd);
|
|
irq_work_queue_on(&rq->rd->rto_push_work, cpu);
|
|
}
|
|
}
|
|
|
|
/* Called from hardirq context */
|
|
void rto_push_irq_work_func(struct irq_work *work)
|
|
{
|
|
struct root_domain *rd =
|
|
container_of(work, struct root_domain, rto_push_work);
|
|
struct rq *rq;
|
|
int cpu;
|
|
|
|
rq = this_rq();
|
|
|
|
/*
|
|
* We do not need to grab the lock to check for has_pushable_tasks.
|
|
* When it gets updated, a check is made if a push is possible.
|
|
*/
|
|
if (has_pushable_tasks(rq)) {
|
|
raw_spin_lock(&rq->lock);
|
|
push_rt_tasks(rq);
|
|
raw_spin_unlock(&rq->lock);
|
|
}
|
|
|
|
raw_spin_lock(&rd->rto_lock);
|
|
|
|
/* Pass the IPI to the next rt overloaded queue */
|
|
cpu = rto_next_cpu(rd);
|
|
|
|
raw_spin_unlock(&rd->rto_lock);
|
|
|
|
if (cpu < 0) {
|
|
sched_put_rd(rd);
|
|
return;
|
|
}
|
|
|
|
/* Try the next RT overloaded CPU */
|
|
irq_work_queue_on(&rd->rto_push_work, cpu);
|
|
}
|
|
#endif /* HAVE_RT_PUSH_IPI */
|
|
|
|
static void pull_rt_task(struct rq *this_rq)
|
|
{
|
|
int this_cpu = this_rq->cpu, cpu;
|
|
bool resched = false;
|
|
struct task_struct *p;
|
|
struct rq *src_rq;
|
|
int rt_overload_count = rt_overloaded(this_rq);
|
|
|
|
if (likely(!rt_overload_count))
|
|
return;
|
|
|
|
/*
|
|
* Match the barrier from rt_set_overloaded; this guarantees that if we
|
|
* see overloaded we must also see the rto_mask bit.
|
|
*/
|
|
smp_rmb();
|
|
|
|
/* If we are the only overloaded CPU do nothing */
|
|
if (rt_overload_count == 1 &&
|
|
cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
|
|
return;
|
|
|
|
#ifdef HAVE_RT_PUSH_IPI
|
|
if (sched_feat(RT_PUSH_IPI)) {
|
|
tell_cpu_to_push(this_rq);
|
|
return;
|
|
}
|
|
#endif
|
|
|
|
for_each_cpu(cpu, this_rq->rd->rto_mask) {
|
|
if (this_cpu == cpu)
|
|
continue;
|
|
|
|
src_rq = cpu_rq(cpu);
|
|
|
|
/*
|
|
* Don't bother taking the src_rq->lock if the next highest
|
|
* task is known to be lower-priority than our current task.
|
|
* This may look racy, but if this value is about to go
|
|
* logically higher, the src_rq will push this task away.
|
|
* And if its going logically lower, we do not care
|
|
*/
|
|
if (src_rq->rt.highest_prio.next >=
|
|
this_rq->rt.highest_prio.curr)
|
|
continue;
|
|
|
|
/*
|
|
* We can potentially drop this_rq's lock in
|
|
* double_lock_balance, and another CPU could
|
|
* alter this_rq
|
|
*/
|
|
double_lock_balance(this_rq, src_rq);
|
|
|
|
/*
|
|
* We can pull only a task, which is pushable
|
|
* on its rq, and no others.
|
|
*/
|
|
p = pick_highest_pushable_task(src_rq, this_cpu);
|
|
|
|
/*
|
|
* Do we have an RT task that preempts
|
|
* the to-be-scheduled task?
|
|
*/
|
|
if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
|
|
WARN_ON(p == src_rq->curr);
|
|
WARN_ON(!task_on_rq_queued(p));
|
|
|
|
/*
|
|
* There's a chance that p is higher in priority
|
|
* than what's currently running on its cpu.
|
|
* This is just that p is wakeing up and hasn't
|
|
* had a chance to schedule. We only pull
|
|
* p if it is lower in priority than the
|
|
* current task on the run queue
|
|
*/
|
|
if (p->prio < src_rq->curr->prio)
|
|
goto skip;
|
|
|
|
resched = true;
|
|
|
|
deactivate_task(src_rq, p, 0);
|
|
p->on_rq = TASK_ON_RQ_MIGRATING;
|
|
set_task_cpu(p, this_cpu);
|
|
p->on_rq = TASK_ON_RQ_QUEUED;
|
|
activate_task(this_rq, p, 0);
|
|
/*
|
|
* We continue with the search, just in
|
|
* case there's an even higher prio task
|
|
* in another runqueue. (low likelihood
|
|
* but possible)
|
|
*/
|
|
}
|
|
skip:
|
|
double_unlock_balance(this_rq, src_rq);
|
|
}
|
|
|
|
if (resched)
|
|
resched_curr(this_rq);
|
|
}
|
|
|
|
/*
|
|
* If we are not running and we are not going to reschedule soon, we should
|
|
* try to push tasks away now
|
|
*/
|
|
static void task_woken_rt(struct rq *rq, struct task_struct *p)
|
|
{
|
|
if (!task_running(rq, p) &&
|
|
!test_tsk_need_resched(rq->curr) &&
|
|
p->nr_cpus_allowed > 1 &&
|
|
(dl_task(rq->curr) || rt_task(rq->curr)) &&
|
|
(rq->curr->nr_cpus_allowed < 2 ||
|
|
rq->curr->prio <= p->prio))
|
|
push_rt_tasks(rq);
|
|
}
|
|
|
|
/* Assumes rq->lock is held */
|
|
static void rq_online_rt(struct rq *rq)
|
|
{
|
|
if (rq->rt.overloaded)
|
|
rt_set_overload(rq);
|
|
|
|
__enable_runtime(rq);
|
|
|
|
cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
|
|
}
|
|
|
|
/* Assumes rq->lock is held */
|
|
static void rq_offline_rt(struct rq *rq)
|
|
{
|
|
if (rq->rt.overloaded)
|
|
rt_clear_overload(rq);
|
|
|
|
__disable_runtime(rq);
|
|
|
|
cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
|
|
}
|
|
|
|
/*
|
|
* When switch from the rt queue, we bring ourselves to a position
|
|
* that we might want to pull RT tasks from other runqueues.
|
|
*/
|
|
static void switched_from_rt(struct rq *rq, struct task_struct *p)
|
|
{
|
|
/*
|
|
* On class switch from rt, always cancel active schedtune timers,
|
|
* this handles the cases where we switch class for a task that is
|
|
* already rt-dequeued but has a running timer.
|
|
*/
|
|
schedtune_dequeue_rt(rq, p);
|
|
|
|
/*
|
|
* If there are other RT tasks then we will reschedule
|
|
* and the scheduling of the other RT tasks will handle
|
|
* the balancing. But if we are the last RT task
|
|
* we may need to handle the pulling of RT tasks
|
|
* now.
|
|
*/
|
|
if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
|
|
return;
|
|
|
|
queue_pull_task(rq);
|
|
}
|
|
|
|
void __init init_sched_rt_class(void)
|
|
{
|
|
unsigned int i;
|
|
|
|
for_each_possible_cpu(i) {
|
|
zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
|
|
GFP_KERNEL, cpu_to_node(i));
|
|
}
|
|
}
|
|
#endif /* CONFIG_SMP */
|
|
|
|
/*
|
|
* When switching a task to RT, we may overload the runqueue
|
|
* with RT tasks. In this case we try to push them off to
|
|
* other runqueues.
|
|
*/
|
|
static void switched_to_rt(struct rq *rq, struct task_struct *p)
|
|
{
|
|
/*
|
|
* If we are already running, then there's nothing
|
|
* that needs to be done. But if we are not running
|
|
* we may need to preempt the current running task.
|
|
* If that current running task is also an RT task
|
|
* then see if we can move to another run queue.
|
|
*/
|
|
if (task_on_rq_queued(p) && rq->curr != p) {
|
|
#ifdef CONFIG_SMP
|
|
if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
|
|
queue_push_tasks(rq);
|
|
#endif /* CONFIG_SMP */
|
|
if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
|
|
resched_curr(rq);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Priority of the task has changed. This may cause
|
|
* us to initiate a push or pull.
|
|
*/
|
|
static void
|
|
prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
|
|
{
|
|
if (!task_on_rq_queued(p))
|
|
return;
|
|
|
|
if (rq->curr == p) {
|
|
#ifdef CONFIG_SMP
|
|
/*
|
|
* If our priority decreases while running, we
|
|
* may need to pull tasks to this runqueue.
|
|
*/
|
|
if (oldprio < p->prio)
|
|
queue_pull_task(rq);
|
|
|
|
/*
|
|
* If there's a higher priority task waiting to run
|
|
* then reschedule.
|
|
*/
|
|
if (p->prio > rq->rt.highest_prio.curr)
|
|
resched_curr(rq);
|
|
#else
|
|
/* For UP simply resched on drop of prio */
|
|
if (oldprio < p->prio)
|
|
resched_curr(rq);
|
|
#endif /* CONFIG_SMP */
|
|
} else {
|
|
/*
|
|
* This task is not running, but if it is
|
|
* greater than the current running task
|
|
* then reschedule.
|
|
*/
|
|
if (p->prio < rq->curr->prio)
|
|
resched_curr(rq);
|
|
}
|
|
}
|
|
|
|
static void watchdog(struct rq *rq, struct task_struct *p)
|
|
{
|
|
unsigned long soft, hard;
|
|
|
|
/* max may change after cur was read, this will be fixed next tick */
|
|
soft = task_rlimit(p, RLIMIT_RTTIME);
|
|
hard = task_rlimit_max(p, RLIMIT_RTTIME);
|
|
|
|
if (soft != RLIM_INFINITY) {
|
|
unsigned long next;
|
|
|
|
if (p->rt.watchdog_stamp != jiffies) {
|
|
p->rt.timeout++;
|
|
p->rt.watchdog_stamp = jiffies;
|
|
}
|
|
|
|
next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
|
|
if (p->rt.timeout > next)
|
|
p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
|
|
}
|
|
}
|
|
|
|
static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
|
|
{
|
|
struct sched_rt_entity *rt_se = &p->rt;
|
|
|
|
update_curr_rt(rq);
|
|
|
|
watchdog(rq, p);
|
|
|
|
/*
|
|
* RR tasks need a special form of timeslice management.
|
|
* FIFO tasks have no timeslices.
|
|
*/
|
|
if (p->policy != SCHED_RR)
|
|
return;
|
|
|
|
if (--p->rt.time_slice)
|
|
return;
|
|
|
|
p->rt.time_slice = sched_rr_timeslice;
|
|
|
|
/*
|
|
* Requeue to the end of queue if we (and all of our ancestors) are not
|
|
* the only element on the queue
|
|
*/
|
|
for_each_sched_rt_entity(rt_se) {
|
|
if (rt_se->run_list.prev != rt_se->run_list.next) {
|
|
requeue_task_rt(rq, p, 0);
|
|
resched_curr(rq);
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
|
|
static void set_curr_task_rt(struct rq *rq)
|
|
{
|
|
struct task_struct *p = rq->curr;
|
|
|
|
p->se.exec_start = rq_clock_task(rq);
|
|
|
|
/* The running task is never eligible for pushing */
|
|
dequeue_pushable_task(rq, p);
|
|
}
|
|
|
|
static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
|
|
{
|
|
/*
|
|
* Time slice is 0 for SCHED_FIFO tasks
|
|
*/
|
|
if (task->policy == SCHED_RR)
|
|
return sched_rr_timeslice;
|
|
else
|
|
return 0;
|
|
}
|
|
|
|
const struct sched_class rt_sched_class = {
|
|
.next = &fair_sched_class,
|
|
.enqueue_task = enqueue_task_rt,
|
|
.dequeue_task = dequeue_task_rt,
|
|
.yield_task = yield_task_rt,
|
|
|
|
.check_preempt_curr = check_preempt_curr_rt,
|
|
|
|
.pick_next_task = pick_next_task_rt,
|
|
.put_prev_task = put_prev_task_rt,
|
|
|
|
#ifdef CONFIG_SMP
|
|
.select_task_rq = select_task_rq_rt,
|
|
|
|
.set_cpus_allowed = set_cpus_allowed_common,
|
|
.rq_online = rq_online_rt,
|
|
.rq_offline = rq_offline_rt,
|
|
.task_woken = task_woken_rt,
|
|
.switched_from = switched_from_rt,
|
|
#endif
|
|
|
|
.set_curr_task = set_curr_task_rt,
|
|
.task_tick = task_tick_rt,
|
|
|
|
.get_rr_interval = get_rr_interval_rt,
|
|
|
|
.prio_changed = prio_changed_rt,
|
|
.switched_to = switched_to_rt,
|
|
|
|
.update_curr = update_curr_rt,
|
|
};
|
|
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
|
|
|
|
void print_rt_stats(struct seq_file *m, int cpu)
|
|
{
|
|
rt_rq_iter_t iter;
|
|
struct rt_rq *rt_rq;
|
|
|
|
rcu_read_lock();
|
|
for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
|
|
print_rt_rq(m, cpu, rt_rq);
|
|
rcu_read_unlock();
|
|
}
|
|
#endif /* CONFIG_SCHED_DEBUG */
|