This is the merge of the upstream LTS release of 5.15.94 into the android13-5.15 branch. It contains the following commits: *5448b2fda8Merge 5.15.94 into android13-5.15-lts |\ | *e2c1a934fdLinux 5.15.94 | *17170acdc7Documentation/hw-vuln: Add documentation for Cross-Thread Return Predictions | *5122e0e443KVM: x86: Mitigate the cross-thread return address predictions bug | *8f12dcab90x86/speculation: Identify processors vulnerable to SMT RSB predictions | *e63c434de8drm/i915: Fix VBT DSI DVO port handling | *fc88c68381drm/i915: Initialize the obj flags for shmem objects | *2e557c8ca2drm/amdgpu/fence: Fix oops due to non-matching drm_sched init/fini | *3af734f3eaFix page corruption caused by racy check in __free_pages | *c94ce5ea68arm64: dts: meson-axg: Make mmc host controller interrupts level-sensitive | *b796c02df3arm64: dts: meson-g12-common: Make mmc host controller interrupts level-sensitive | *5d9b771f53arm64: dts: meson-gx: Make mmc host controller interrupts level-sensitive | *ac39dce119rtmutex: Ensure that the top waiter is always woken up | *86f7e42393powerpc/64s/interrupt: Fix interrupt exit race with security mitigation switch | *2907cf3f2eriscv: Fixup race condition on PG_dcache_clean in flush_icache_pte | *beb1cefa3cceph: flush cap releases when the session is flushed | *86733ab239clk: ingenic: jz4760: Update M/N/OD calculation algorithm | *239e927eb2usb: typec: altmodes/displayport: Fix probe pin assign check | *48aecce116usb: core: add quirk for Alcor Link AK9563 smartcard reader | *a8178bb1c7btrfs: free device in btrfs_close_devices for a single device filesystem | *8d13f2c3e2mptcp: be careful on subflow status propagation on errors | *25141fb411net: USB: Fix wrong-direction WARNING in plusb.c | *d1fba1e096cifs: Fix use-after-free in rdata->read_into_pages() | *1b83e7e174pinctrl: intel: Restore the pins that used to be in Direct IRQ mode | *f5f025b703spi: dw: Fix wrong FIFO level setting for long xfers | *71668706fbpinctrl: single: fix potential NULL dereference | *a2a1065739pinctrl: aspeed: Fix confusing types in return value | *99450163bcpinctrl: mediatek: Fix the drive register definition of some Pins | *9f0d2c2684ASoC: topology: Return -ENOMEM on memory allocation failure | *1a52ef89e3riscv: stacktrace: Fix missing the first frame | *5fb8154334ALSA: pci: lx6464es: fix a debug loop | *105ea562f6selftests: forwarding: lib: quote the sysctl values | *528e3f3a4brds: rds_rm_zerocopy_callback() use list_first_entry() | *48d6d8f2f6igc: Add ndo_tx_timeout support | *62ff7dd961net/mlx5: Serialize module cleanup with reload and remove | *95d2394f84net/mlx5: fw_tracer, Zero consumer index when reloading the tracer | *ab7f3f6a9dnet/mlx5: fw_tracer, Clear load bit when freeing string DBs buffers | *193528646enet/mlx5e: IPoIB, Show unknown speed instead of error | *7c6e8eb617net/mlx5: Bridge, fix ageing of peer FDB entries | *49ece61a07net/mlx5e: Update rx ring hw mtu upon each rx-fcs flag change | *31172267banet/mlx5e: Introduce the mlx5e_flush_rq function | *e4e4e93d31net/mlx5e: Move repeating clear_bit in mlx5e_rx_reporter_err_rq_cqe_recover | *3f18b9ed8cnet: mscc: ocelot: fix VCAP filters not matching on MAC with "protocol 802.1Q" | *6acb5d853bnet: dsa: mt7530: don't change PVC_EG_TAG when CPU port becomes VLAN-aware | *ca834a0178ice: Do not use WQ_MEM_RECLAIM flag for workqueue | *70d48c7992uapi: add missing ip/ipv6 header dependencies for linux/stddef.h | *3cec44036fionic: clean interrupt before enabling queue to avoid credit race | *fad12afe87net: phy: meson-gxl: use MMD access dummy stubs for GXL, internal PHY | *d23385a200bonding: fix error checking in bond_debug_reregister() | *11006d9d08net: phylink: move phy_device_free() to correctly release phy device | *fb022d7b1cxfrm: fix bug with DSCP copy to v6 from v4 tunnel | *6fe1ad42afRDMA/usnic: use iommu_map_atomic() under spin_lock() | *8f5fe1cd8eRDMA/irdma: Fix potential NULL-ptr-dereference | *1b4ef90cbcIB/IPoIB: Fix legacy IPoIB due to wrong number of queues | *5dc688fae6xfrm/compat: prevent potential spectre v1 gadget in xfrm_xlate32_attr() | *9bae58d58bIB/hfi1: Restore allocated resources on failed copyout | *558b1fa01cxfrm: compat: change expression for switch in xfrm_xlate64 | *238b38e89fcan: j1939: do not wait 250 ms if the same addr was already claimed | *d859184b60of/address: Return an error when no valid dma-ranges are found | *70f37b3118tracing: Fix poll() and select() do not work on per_cpu trace_pipe and trace_pipe_raw | *df01749503ALSA: hda/realtek: Enable mute/micmute LEDs on HP Elitebook, 645 G9 | *ca9d542203ALSA: hda/realtek: Fix the speaker output on Samsung Galaxy Book2 Pro 360 | *706b6d86a6ALSA: emux: Avoid potential array out-of-bound in snd_emux_xg_control() | *731fc29de6ALSA: hda/realtek: Add Positivo N14KP6-TG | *b938059807btrfs: zlib: zero-initialize zlib workspace | *e65faa7e39btrfs: limit device extents to the device size | *2e4dd07fdamigrate: hugetlb: check for hugetlb shared PMD in node migration | *072e7412e8mm/migration: return errno when isolate_huge_page failed * |f977f92131Revert "nvmem: core: remove nvmem_config wp_gpio" * |787413edadMerge 5.15.93 into android13-5.15-lts |\| | *85d7786c66Linux 5.15.93 | *6e2fac197dbpf: Skip invalid kfunc call in backtrack_insn | *46c9088cabgfs2: Always check inode size of inline inodes | *8eb2e58a92gfs2: Cosmetic gfs2_dinode_{in,out} cleanup | *e4991910f1wifi: brcmfmac: Check the count value of channel spec to prevent out-of-bounds reads | *97ccfffcc0f2fs: fix to do sanity check on i_extra_isize in is_alive() | *64fa364ad3fbdev: smscufx: fix error handling code in ufx_usb_probe | *a77141a063ovl: Use "buf" flexible array for memcpy() destination | *1692fedd0ffs/ntfs3: Validate attribute data and valid sizes | *a5b9cb7276powerpc/imc-pmu: Revert nest_init_lock to being a mutex | *3691f43a09iio:adc:twl6030: Enable measurement of VAC | *8c84f50390bpf: Do not reject when the stack read size is different from the tracked scalar size | *14b6198abbbpf: Fix incorrect state pruning for <8B spill/fill | *575a9f6fefphy: qcom-qmp-combo: fix runtime suspend | *e58df87394phy: qcom-qmp-combo: fix broken power on | *368ea32e0aphy: qcom-qmp-usb: fix memleak on probe deferral | *2f27d3811aphy: qcom-qmp-combo: fix memleak on probe deferral | *0cb10ddab7phy: qcom-qmp-combo: disable runtime PM on unbind | *0ef5ffe116serial: 8250_dma: Fix DMA Rx rearm race | *e30328f599serial: 8250_dma: Fix DMA Rx completion race | *a5a171f61anvmem: core: fix cell removal on error | *6d9fa3ff65nvmem: core: remove nvmem_config wp_gpio | *adf80e072cnvmem: core: initialise nvmem->id early | *e3ebc3e23bdrm/i915: Fix potential bit_17 double-free | *997bed0f3cSquashfs: fix handling and sanity checking of xattr_ids count | *7a0cfaf9d4highmem: round down the address passed to kunmap_flush_on_unmap() | *5dbe1ebd56mm/swapfile: add cond_resched() in get_swap_pages() | *daf8241804fpga: stratix10-soc: Fix return value check in s10_ops_write_init() | *afd32b6831x86/debug: Fix stack recursion caused by wrongly ordered DR7 accesses | *066ecbf1a5kernel/irq/irqdomain.c: fix memory leak with using debugfs_lookup() | *481bf49f58usb: gadget: f_uac2: Fix incorrect increment of bNumEndpoints | *fdf40e5824mm: hugetlb: proc: check for hugetlb shared PMD in /proc/PID/smaps | *6c300351c5riscv: disable generation of unwind tables | *a5c275add9parisc: Wire up PTRACE_GETREGS/PTRACE_SETREGS for compat case | *a964decd13parisc: Fix return code of pdc_iodc_print() | *488eaf0625nvmem: qcom-spmi-sdam: fix module autoloading | *8569beb66fiio: imu: fxos8700: fix MAGN sensor scale and unit | *8aa5cdcfafiio: imu: fxos8700: remove definition FXOS8700_CTRL_ODR_MIN | *4112ba1ad5iio: imu: fxos8700: fix failed initialization ODR mode assignment | *abf7b2ba51iio: imu: fxos8700: fix incorrect ODR mode readback | *412757741ciio: imu: fxos8700: fix swapped ACCEL and MAGN channels readback | *34909532b1iio: imu: fxos8700: fix map label of channel type to MAGN sensor | *8346eb4987iio: imu: fxos8700: fix IMU data bits returned to user space | *7567cdf3ceiio: imu: fxos8700: fix incomplete ACCEL and MAGN channels readback | *6969852220iio: imu: fxos8700: fix ACCEL measurement range selection | *cdacfb2205iio:adc:twl6030: Enable measurements of VUSB, VBAT and others | *9988063dceiio: adc: berlin2-adc: Add missing of_node_put() in error path | *c691a5c0fdiio: hid: fix the retval in gyro_3d_capture_sample | *ef80a34699iio: hid: fix the retval in accel_3d_capture_sample | *c4eae85c73efi: Accept version 2 of memory attributes table | *710db82063ALSA: hda/realtek: Add Acer Predator PH315-54 | *3fbddf86d9watchdog: diag288_wdt: fix __diag288() inline assembly | *700dd5bc72watchdog: diag288_wdt: do not use stack buffers for hardware data | *21bc51e29enet: qrtr: free memory on error path in radix_tree_insert() | *dccbd062d7fbcon: Check font dimension limits | *5d7500d991Input: i8042 - add Clevo PCX0DX to i8042 quirk table | *fc9e27f3bavc_screen: move load of struct vc_data pointer in vcs_read() to avoid UAF | *9ba1188a71usb: gadget: f_fs: Fix unbalanced spinlock in __ffs_ep0_queue_wait | *fe86480e90usb: dwc3: qcom: enable vbus override when in OTG dr-mode | *a412fe7bafiio: adc: stm32-dfsdm: fill module aliases | *9944659398drm/amd/display: Fix timing not changning when freesync video is enabled | *a3967128bcnet/x25: Fix to not accept on connected socket | *396ea318e7platform/x86: gigabyte-wmi: add support for B450M DS3H WIFI-CF | *1577524633platform/x86: dell-wmi: Add a keymap for KEY_MUTE in type 0x0010 table | *540cea9f9bi2c: rk3x: fix a bunch of kernel-doc warnings | *0aaabdb900scsi: iscsi_tcp: Fix UAF during login when accessing the shost ipaddress | *17b738590bscsi: iscsi_tcp: Fix UAF during logout when accessing the shost ipaddress | *8cd0499f9cperf/x86/intel: Add Emerald Rapids | *7093515370scsi: target: core: Fix warning on RT kernels | *b7960f5436i2c: mxs: suppress probe-deferral error message | *b9b87fc34bi2c: designware-pci: Add new PCI IDs for AMD NAVI GPU | *d8fc0b5fb3efi: fix potential NULL deref in efi_mem_reserve_persistent | *f423c2efd5net: openvswitch: fix flow memory leak in ovs_flow_cmd_new | *7985028647virtio-net: Keep stop() to follow mirror sequence of open() | *5d884f9e80selftests: net: udpgso_bench_tx: Cater for pending datagrams zerocopy benchmarking | *63aa63af3aselftests: net: udpgso_bench: Fix racing bug between the rx/tx programs | *d41a3f9cc2selftests: net: udpgso_bench_rx/tx: Stop when wrong CLI args are provided | *5af98283e5selftests: net: udpgso_bench_rx: Fix 'used uninitialized' compiler warning | *89e0701e03ata: libata: Fix sata_down_spd_limit() when no link speed is reported | *9ab896775fcan: j1939: fix errant WARN_ON_ONCE in j1939_session_deactivate | *02d77d98e0igc: return an error if the mac type is unknown in igc_ptp_systim_to_hwtstamp() | *04a7355820riscv: kprobe: Fixup kernel panic when probing an illegal position | *206c367b6aip/ip6_gre: Fix non-point-to-point tunnel not generating IPv6 link local address | *90178bc0f2ip/ip6_gre: Fix changing addr gen mode not generating IPv6 link local address | *dfe2f0ea38net: phy: meson-gxl: Add generic dummy stubs for MMD register access | *b7398efe24squashfs: harden sanity check in squashfs_read_xattr_id_table | *89a69216f1netfilter: br_netfilter: disable sabotage_in hook after first suppression | *cdb444e73fdrm/i915/adlp: Fix typo for reference clock | *960f20d858drm/i915/guc: Fix locking when searching for a hung request | *c27e0eac56netrom: Fix use-after-free caused by accept on already connected socket | *511c922c5bblock, bfq: fix uaf for bfqq in bic_set_bfqq() | *a62c129dcbblock, bfq: replace 0/1 with false/true in bic apis | *37a744a068block/bfq-iosched.c: use "false" rather than "BLK_RW_ASYNC" | *2cd1e9c013net: phy: dp83822: Fix null pointer access on DP83825/DP83826 devices | *18c18c2110sfc: correctly advertise tunneled IPv6 segmentation | *878b06f60adpaa2-eth: execute xdp_do_flush() before napi_complete_done() | *3b5774cd6bdpaa_eth: execute xdp_do_flush() before napi_complete_done() | *5a7040a649virtio-net: execute xdp_do_flush() before napi_complete_done() | *94add5b272qede: execute xdp_do_flush() before napi_complete_done() | *a273f8e3abice: Prevent set_channel from changing queues while RDMA active | *b432e183c2fix "direction" argument of iov_iter_kvec() | *d8b8306e96fix iov_iter_bvec() "direction" argument | *389c7c0ef9READ is "data destination", not source... | *7a3649bf5bWRITE is "data source", not destination... | *83cc6a7bb7vhost/net: Clear the pending messages when the backend is removed | *7c7d344bc3scsi: Revert "scsi: core: map PQ=1, PDT=other values to SCSI_SCAN_TARGET_PRESENT" | *4b199dc094drm/vc4: hdmi: make CEC adapter name unique | *dc1f8ab25aarm64: dts: imx8mm: Fix pad control for UART1_DTE_RX | *c681d7a4edbpf, sockmap: Check for any of tcp_bpf_prots when cloning a listener | *34ad5d8885bpf: Fix to preserve reg parent/live fields when copying range info | *7b86f9ab56bpf: Support <8-byte scalar spill and refill | *1b9256c962ALSA: hda/via: Avoid potential array out-of-bound in add_secret_dac_path() | *b7abeb6916bpf: Fix a possible task gone issue with bpf_send_signal[_thread]() helpers | *cfcc2390dbASoC: Intel: bytcr_wm5102: Drop reference count of ACPI device after use | *b4b204565aASoC: Intel: bytcr_rt5640: Drop reference count of ACPI device after use | *1f1e7635c5ASoC: Intel: bytcr_rt5651: Drop reference count of ACPI device after use | *41d323c352ASoC: Intel: bytcht_es8316: Drop reference count of ACPI device after use | *6a9990e1d9ASoC: Intel: bytcht_es8316: move comment to the right place | *ffcdf35455ASoC: Intel: boards: fix spelling in comments | *bd0b17ab1bbus: sunxi-rsb: Fix error handling in sunxi_rsb_init() | *5f4543c938firewire: fix memory leak for payload of request subaction to IEC 61883-1 FCP region * |5020746bffMerge 5.15.92 into android13-5.15-lts |\| | *e515b9902fLinux 5.15.92 | *c7caf669b8net: mctp: purge receive queues on sk destruction | *046de74f9anet: fix NULL pointer in skb_segment_list | *7ab3376703selftests: Provide local define of __cpuid_count() | *e92e311cedselftests/vm: remove ARRAY_SIZE define from individual tests | *c9e52db900tools: fix ARRAY_SIZE defines in tools and selftests hdrs | *c1aa0dd52dBluetooth: fix null ptr deref on hci_sync_conn_complete_evt | *02e61196c5ACPI: processor idle: Practically limit "Dummy wait" workaround to old Intel systems | *79dd676b44extcon: usbc-tusb320: fix kernel-doc warning | *c2bd60ef20ext4: fix bad checksum after online resize | *4cd1e18bc0cifs: fix return of uninitialized rc in dfs_cache_update_tgthint() | *43acd767bddmaengine: imx-sdma: Fix a possible memory leak in sdma_transfer_init | *a54c5ad007HID: playstation: sanity check DualSense calibration data. | *6d7686cc11blk-cgroup: fix missing pd_online_fn() while activating policy | *2144859229erofs/zmap.c: Fix incorrect offset calculation | *0dfef50313bpf: Skip task with pid=1 in send_signal_common() | *e8bb772f74firmware: arm_scmi: Clear stale xfer->hdr.status | *80cb9f1a76arm64: dts: imx8mq-thor96: fix no-mmc property for SDHCI | *162fad24d2arm64: dts: freescale: Fix pca954x i2c-mux node names | *82ad105e1aARM: dts: vf610: Fix pca9548 i2c-mux node names | *5aee5f33e0ARM: dts: imx: Fix pca9547 i2c-mux node name * |7e0097918fRevert "scsi: ufs: core: Fix devfreq deadlocks" * |6ce0fcdcc2Revert "thermal/core: Rename 'trips' to 'num_trips'" * |49a5232dfbRevert "thermal: Validate new state in cur_state_store()" * |be0ca2fc43Revert "thermal/core: fix error code in __thermal_cooling_device_register()" * |9617a003ccRevert "thermal: core: call put_device() only after device_register() fails" * |ccb2c48531Revert "cpufreq: governor: Use kobject release() method to free dbs_data" * |0108f014a5Revert "gpio: use raw spinlock for gpio chip shadowed data" * |1d2449f6beRevert "gpio: mxc: Protect GPIO irqchip RMW with bgpio spinlock" * |5f51aedcbaRevert "gpio: mxc: Unlock on error path in mxc_flip_edge()" * |7622c50ba6Merge 5.15.91 into android13-5.15-lts |\| | *9cf4111cdfLinux 5.15.91 | *14cc13e433perf/x86/amd: fix potential integer overflow on shift of a int | *033636b322netfilter: conntrack: unify established states for SCTP paths | *0b08201158x86/i8259: Mark legacy PIC interrupts with IRQ_LEVEL | *b577400367block: fix and cleanup bio_check_ro | *1d152437e4kbuild: Allow kernel installation packaging to override pkg-config | *a196468858cpufreq: governor: Use kobject release() method to free dbs_data | *7c513ced0dcpufreq: Move to_gov_attr_set() to cpufreq.h | *cf7a08622dRevert "Input: synaptics - switch touchpad on HP Laptop 15-da3001TU to RMI mode" | *53c5d61198tools: gpio: fix -c option of gpio-event-mon | *a7d1a303fftreewide: fix up files incorrectly marked executable | *046fe53907net: mdio-mux-meson-g12a: force internal PHY off on mux switch | *86bdccde78net/tg3: resolve deadlock in tg3_reset_task() during EEH | *4364bf79d8thermal: intel: int340x: Add locking to int340x_thermal_get_trip_type() | *e69c3a0d9dnet: mctp: mark socks as dead on unhash, prevent re-add | *954cc215cdnet: ravb: Fix possible hang if RIS2_QFF1 happen | *0f7218bf0anet: ravb: Fix lack of register setting after system resumed for Gen3 | *3db4ca2938ravb: Rename "no_ptp_cfg_active" and "ptp_cfg_active" variables | *621f296f11gpio: mxc: Unlock on error path in mxc_flip_edge() | *071a839286nvme: fix passthrough csi check | *614471b7f7riscv/kprobe: Fix instruction simulation of JALR | *3391bd4235sctp: fail if no bound addresses can be used for a given scope | *b0784860e1net/sched: sch_taprio: do not schedule in taprio_reset() | *d2d3ab1b1dnetrom: Fix use-after-free of a listening socket. | *9df5ab02c6netfilter: conntrack: fix vtag checks for ABORT/SHUTDOWN_COMPLETE | *ca3cf94776ipv4: prevent potential spectre v1 gadget in fib_metrics_match() | *d50e7348b4ipv4: prevent potential spectre v1 gadget in ip_metrics_convert() | *ead06e3449netlink: annotate data races around sk_state | *c4eb423c6bnetlink: annotate data races around dst_portid and dst_group | *fac9b69a93netlink: annotate data races around nlk->portid | *8a13595600netfilter: nft_set_rbtree: skip elements in transaction from garbage collection | *2bf1435fa1netfilter: nft_set_rbtree: Switch to node list walk for overlap detection | *e481654426drm/i915/selftest: fix intel_selftest_modify_policy argument types | *66689a72banet: fix UaF in netns ops registration error path | *41b74e95f2netlink: prevent potential spectre v1 gadgets | *2f29d780bdi2c: designware: use casting of u64 in clock multiplication to avoid overflow | *b03f7ed9afscsi: ufs: core: Fix devfreq deadlocks | *858d7e9218net: mana: Fix IRQ name - add PCI and queue number | *bff5243bd3EDAC/qcom: Do not pass llcc_driv_data as edac_device_ctl_info's pvt_info | *5eedf4568dEDAC/device: Respect any driver-supplied workqueue polling value | *4b7dfd0a68ARM: 9280/1: mm: fix warning on phys_addr_t to void pointer assignment | *7807871f28ipv6: fix reachability confirmation with proxy_ndp | *f9a22f6fa1thermal: intel: int340x: Protect trip temperature from concurrent updates | *036093c08dKVM: arm64: GICv4.1: Fix race with doorbell on VPE activation/deactivation | *c56683c062KVM: x86/vmx: Do not skip segment attributes if unusable bit is set | *e91308e637ovl: fail on invalid uid/gid mapping at copy up | *33a9657d67ksmbd: limit pdu length size according to connection status | *8d83a758eeksmbd: downgrade ndr version error message to debug | *87a7f38a90ksmbd: do not sign response to session request for guest login | *4210c3555dksmbd: add max connections parameter | *cc6742b160ksmbd: add smbd max io size parameter | *3c8a5648a5i2c: mv64xxx: Add atomic_xfer method to driver | *e619ab4fb3i2c: mv64xxx: Remove shutdown method from driver | *4b83bc6f87cifs: Fix oops due to uncleared server->smbd_conn in reconnect | *89042d3d85ftrace/scripts: Update the instructions for ftrace-bisect.sh | *592ba7116ftrace_events_hist: add check for return value of 'create_hist_field' | *b0af180514tracing: Make sure trace_printk() can output as soon as it can be used | *91135d7233module: Don't wait for GOING modules | *85ee9919adKVM: SVM: fix tsc scaling cache logic | *f0227eca97scsi: hpsa: Fix allocation size for scsi_host_alloc() | *e5af9a458adrm/amdgpu: complete gfxoff allow signal during suspend without delay | *62b9e9f921Bluetooth: hci_sync: cancel cmd_timer if hci_open failed | *21998acd31exit: Use READ_ONCE() for all oops/warn limit reads | *e82b1598ebdocs: Fix path paste-o for /sys/kernel/warn_count | *1c51698ad6panic: Expose "warn_count" to sysfs | *0691ddae56panic: Introduce warn_limit | *7b98914a6cpanic: Consolidate open-coded panic_on_warn checks | *fc636b1362exit: Allow oops_limit to be disabled | *339f8a8e52exit: Expose "oops_count" to sysfs | *f80fb0001fexit: Put an upper limit on how often we can oops | *2857ce7f47panic: Separate sysctl logic from CONFIG_SMP | *e156d4dcb0ia64: make IA64_MCA_RECOVERY bool instead of tristate | *9024f77224csky: Fix function name in csky_alignment() and die() | *2ea497d153h8300: Fix build errors from do_exit() to make_task_dead() transition | *a452ca0228hexagon: Fix function name in die() | *3b39f47474objtool: Add a missing comma to avoid string concatenation | *39a26d8721exit: Add and use make_task_dead. | *b5c1acaa43kasan: no need to unset panic_on_warn in end_report() | *b5c967dc68ubsan: no need to unset panic_on_warn in ubsan_epilogue() | *e4cd210032panic: unset panic_on_warn inside panic() | *191f1f1f6akernel/panic: move panic sysctls to its own file | *654f6e8512sysctl: add a new register_sysctl_init() interface | *3aa991cde9fs: reiserfs: remove useless new_opts in reiserfs_remount | *d830531f8fx86: ACPI: cstate: Optimize C3 entry on AMD CPUs | *1f54762231drm/i915: Remove unused variable | *6e10127093Revert "selftests/bpf: check null propagation only neither reg is PTR_TO_BTF_ID" | *619ee31b96drm/i915: Allow switching away via vga-switcheroo if uninitialized | *ea435ba9ebfirmware: coreboot: Check size of table entry and use flex-array | *a4e70bcf2elockref: stop doing cpu_relax in the cmpxchg loop | *b0ee61f5eeplatform/x86: asus-nb-wmi: Add alternate mapping for KEY_SCREENLOCK | *e8d2f7f566platform/x86: touchscreen_dmi: Add info for the CSL Panther Tab HD | *2e0a8bacber8152: add vendor/device ID pair for Microsoft Devkit | *d4b717e34dscsi: hisi_sas: Set a port invalid only if there are no devices attached when refreshing port id | *e15750aa28KVM: s390: interrupt: use READ_ONCE() before cmpxchg() | *9300c65207spi: spidev: remove debug messages that access spidev->spi without locking | *48ff5d3812ASoC: fsl-asoc-card: Fix naming of AC'97 CODEC widgets | *5001ffb31dASoC: fsl_ssi: Rename AC'97 streams to avoid collisions with AC'97 CODEC | *b76120e206cpufreq: armada-37xx: stop using 0 as NULL pointer | *eda26fa856perf/x86/intel/uncore: Add Emerald Rapids | *544f9d4e9dperf/x86/msr: Add Emerald Rapids | *b1eb964d78s390: expicitly align _edata and _end symbols on page boundary | *fb45ec279bs390/debug: add _ASM_S390_ prefix to header guard | *cd488abed9drm: Add orientation quirk for Lenovo ideapad D330-10IGL | *ff7ab370b8net: usb: cdc_ether: add support for Thales Cinterion PLS62-W modem | *d6935084e4ASoC: fsl_micfil: Correct the number of steps on SX controls | *ac07316b2dcpufreq: Add SM6375 to cpufreq-dt-platdev blocklist | *f0e6dcae14kcsan: test: don't put the expect array on the stack | *c51c0b3754cpufreq: Add Tegra234 to cpufreq-dt-platdev blocklist | *28e4e8ca9escsi: iscsi: Fix multiple iSCSI session unbind events sent to userspace | *14b1df2004tcp: fix rate_app_limited to default to 1 | *120b8e527enet: stmmac: enable all safety features by default | *a7d736cc3cthermal: core: call put_device() only after device_register() fails | *ed08f958e4thermal/core: fix error code in __thermal_cooling_device_register() | *108a6f91e2thermal: Validate new state in cur_state_store() | *bd0ea77edfthermal/core: Rename 'trips' to 'num_trips' | *521c6ebd4fthermal/core: Remove duplicate information when an error occurs | *6504afa263net: dsa: microchip: ksz9477: port map correction in ALU table entry register | *18346db185selftests/net: toeplitz: fix race on tpacket_v3 block close | *caa28c7c83driver core: Fix test_async_probe_init saves device in wrong array | *89c62cee5dw1: fix WARNING after calling w1_process() | *3d0eafe413w1: fix deadloop in __w1_remove_master_device() | *7701a4bd45device property: fix of node refcount leak in fwnode_graph_get_next_endpoint() | *ed0d8f731eptdma: pt_core_execute_cmd() should use spinlock | *29e9c67bf3octeontx2-pf: Fix the use of GFP_KERNEL in atomic context on rt | *03bff5819atcp: avoid the lookup process failing to get sk in ehash table | *5bd69d2ea8nvme-pci: fix timeout request state check | *39178dfe86drm/amd/display: fix issues with driver unload | *9a5a537e14phy: phy-can-transceiver: Skip warning if no "max-bitrate" | *4095065b59dmaengine: xilinx_dma: call of_node_put() when breaking out of for_each_child_of_node() | *5bd3c1c1bccifs: fix potential deadlock in cache_refresh_path() | *1a2a47b85cHID: betop: check shape of output reports | *b2a7309743l2tp: prevent lockdep issue in l2tp_tunnel_register() | *edf0e509cevirtio-net: correctly enable callback during start_xmit | *d3401c7624net: macb: fix PTP TX timestamp failure due to packet padding | *71c6019655dmaengine: Fix double increment of client_count in dma_chan_get() | *1e7919f0b1drm/panfrost: fix GENERIC_ATOMIC64 dependency | *a1b3e50e21net: mlx5: eliminate anonymous module_init & module_exit | *09e3fb6f53net/mlx5: E-switch, Fix setting of reserved fields on MODIFY_SCHEDULING_ELEMENT | *01a6e10810net: ipa: disable ipa interrupt during suspend | *98aec50ff7Bluetooth: Fix possible deadlock in rfcomm_sk_state_change | *0e59f60b74usb: gadget: f_fs: Ensure ep0req is dequeued before free_request | *ae8e136bcausb: gadget: f_fs: Prevent race during ffs_ep0_queue_wait | *f25cd2b731HID: revert CHERRY_MOUSE_000C quirk | *39483511fdpinctrl: rockchip: fix mux route data for rk3568 | *1dae88a0b4net: stmmac: fix invalid call to mdiobus_get_phy() | *6716838bf8HID: check empty report_list in bigben_probe() | *2b49568254HID: check empty report_list in hid_validate_values() | *ad67de330dnet: mdio: validate parameter addr in mdiobus_get_phy() | *4869129379net: usb: sr9700: Handle negative len | *2827c4eb42octeontx2-pf: Avoid use of GFP_KERNEL in atomic context | *77e8ed776cl2tp: close all race conditions in l2tp_tunnel_register() | *af22d2c0b4l2tp: convert l2tp_tunnel_list to idr | *22c7d45ca3l2tp: Don't sleep and disable BH under writer-side sk_callback_lock | *87d9205d9al2tp: Serialize access to sk_user_data with sk_callback_lock | *c53acbf2fanet/sched: sch_taprio: fix possible use-after-free | *40516d042bnet: stmmac: Fix queue statistics reading | *620aa67f80pinctrl: rockchip: fix reading pull type on rk3568 | *ddca674af1pinctrl/rockchip: add error handling for pull/drive register getters | *259ab8fb8cpinctrl/rockchip: Use temporary variable for struct device | *8cbf932c5cwifi: rndis_wlan: Prevent buffer overflow in rndis_query_oid | *f792d26e5cgpio: mxc: Always set GPIOs used as interrupt source to INPUT mode | *8335f877efgpio: mxc: Protect GPIO irqchip RMW with bgpio spinlock | *fb4fb3d267gpio: use raw spinlock for gpio chip shadowed data | *52e3eebfe6sch_htb: Avoid grafting on htb_destroy_class_offload when destroying htb | *8232e5a84dnet: enetc: avoid deadlock in enetc_tx_onestep_tstamp() | *95347e41canet: wan: Add checks for NULL for utdm in undo_uhdlc_init and unmap_si_regs | *7f129927fenet: nfc: Fix use-after-free in local_cleanup() | *397aaac884phy: rockchip-inno-usb2: Fix missing clk_disable_unprepare() in rockchip_usb2phy_power_on() | *01bdcc73dbbpf: Fix pointer-leak due to insufficient speculative store bypass mitigation | *261e2f12b6amd-xgbe: Delay AN timeout during KR training | *a8cf4af544amd-xgbe: TX Flow Ctrl Registers are h/w ver dependent | *8e897cb674ARM: dts: at91: sam9x60: fix the ddr clock for sam9x60 | *0a27dcd534NFSD: fix use-after-free in nfsd4_ssc_setup_dul() | *24af570c99phy: ti: fix Kconfig warning and operator precedence | *631fc36685arm64: dts: qcom: msm8992-libra: Fix the memory map | *dda20ffec8arm64: dts: qcom: msm8992-libra: Add CPU regulators | *37ba5e9293arm64: dts: qcom: msm8992: Don't use sfpb mutex | *bab87524f6PM: AVS: qcom-cpr: Fix an error handling path in cpr_probe() | *b7a479c764affs: initialize fsdata in affs_truncate() | *623d111689IB/hfi1: Remove user expected buffer invalidate race | *47d5fc0dcdIB/hfi1: Immediately remove invalid memory from hardware | *85caef2cfdIB/hfi1: Fix expected receive setup error exit issues | *cb193984d4IB/hfi1: Reserve user expected TIDs | *891ddfae39IB/hfi1: Reject a zero-length user expected buffer | *362c948972RDMA/core: Fix ib block iterator counter overflow | *e26c571c3btomoyo: fix broken dependency on *.conf.default | *7dfe83ecc3firmware: arm_scmi: Harden shared memory access in fetch_notification | *a653dbb70cfirmware: arm_scmi: Harden shared memory access in fetch_response | *caffa7fed1EDAC/highbank: Fix memory leak in highbank_mc_probe() | *95de286200reset: uniphier-glue: Fix possible null-ptr-deref | *4773a8cf9areset: uniphier-glue: Use reset_control_bulk API | *7b33accc8fsoc: imx8m: Fix incorrect check for of_clk_get_by_name() | *f07427f8d9arm64: dts: imx8mm-venice-gw7901: fix USB2 controller OC polarity | *c4cb73febeHID: intel_ish-hid: Add check for ishtp_dma_tx_map | *25f97c9883ARM: imx: add missing of_node_put() | *3e9d79ded9arm64: dts: imx8mm-beacon: Fix ecspi2 pinmux | *5381350761ARM: dts: imx6qdl-gw560x: Remove incorrect 'uart-has-rtscts' | *0e4bba1656ARM: dts: imx7d-pico: Use 'clock-frequency' | *108cf4c6d5ARM: dts: imx6ul-pico-dwarf: Use 'clock-frequency' | *207c9e64edarm64: dts: imx8mp-phycore-som: Remove invalid PMIC property | *7ce380fe75dmaengine: ti: k3-udma: Do conditional decrement of UDMA_CHAN_RT_PEER_BCNT_REG | *edba9b7a70memory: mvebu-devbus: Fix missing clk_disable_unprepare in mvebu_devbus_probe() | *e66f6949damemory: atmel-sdramc: Fix missing clk_disable_unprepare in atmel_ramc_probe() | *eda11ab556memory: tegra: Remove clients SID override programming * |cab35cbd71Revert "xhci: Add update_hub_device override for PCI xHCI hosts" * |29e8f224d8Revert "xhci: Detect lpm incapable xHC USB3 roothub ports from ACPI tables" * |5739b27e8fRevert "xhci: Add a flag to disable USB3 lpm on a xhci root port level." * |5b60fdf2e0Merge 5.15.90 into android13-5.15-lts |\| | *aabd5ba7e9Linux 5.15.90 | *4b6f8263e9io_uring/rw: remove leftover debug statement | *b10acfcd61io_uring/rw: ensure kiocb_end_write() is always called | *124fb13cc7io_uring: fix double poll leak on repolling | *e944f1e37bio_uring: Clean up a false-positive warning from GCC 9.3.0 | *940e8922c1mm/khugepaged: fix collapse_pte_mapped_thp() to allow anon_vma | *e83cc8a780soc: qcom: apr: Make qcom,protection-domain optional again | *982c8b1e95Revert "wifi: mac80211: fix memory leak in ieee80211_if_add()" | *40a4797e08block: mq-deadline: Rename deadline_is_seq_writes() | *3abf10b4c4net/mlx5: fix missing mutex_unlock in mlx5_fw_fatal_reporter_err_work() | *1aab00aa41net/ulp: use consistent error code when blocking ULP | *2e4c95a404io_uring/net: fix fast_iov assignment in io_setup_async_msg() | *311b298a33io_uring: io_kiocb_update_pos() should not touch file for non -1 offset | *487a086595tracing: Use alignof__(struct {type b;}) instead of offsetof() | *430443f856x86/fpu: Use _Alignof to avoid undefined behavior in TYPE_ALIGN | *f114717dfaRevert "drm/amdgpu: make display pinning more flexible (v2)" | *7a993c1be5efi: rt-wrapper: Add missing include | *de2af657caarm64: efi: Execute runtime services from a dedicated stack | *9cca110cf8fs/ntfs3: Fix attr_punch_hole() null pointer derenference | *d4d112e5c4drm/amdgpu: drop experimental flag on aldebaran | *c82fa690dadrm/amd/display: Fix COLOR_SPACE_YCBCR2020_TYPE matrix | *88c3375224drm/amd/display: Calculate output_color_space after pixel encoding adjustment | *87e605b161drm/amd/display: Fix set scaling doesn's work | *8687b8cdc3drm/i915/display: Check source height is > 0 | *5d96179166drm/i915: re-disable RC6p on Sandy Bridge | *e9a7ec188bmei: me: add meteor lake point M DID | *eb0421d90fgsmi: fix null-deref in gsmi_get_variable | *b8d99cda52serial: atmel: fix incorrect baudrate setup | *b85498385aserial: amba-pl011: fix high priority character transmission in rs486 mode | *0f150134dddmaengine: idxd: Let probe fail when workqueue cannot be enabled | *1e8c127c2edmaengine: tegra210-adma: fix global intr clear | *473e2281f7dmaengine: lgm: Move DT parsing after initialization | *73337724cbserial: pch_uart: Pass correct sg to dma_unmap_sg() | *4307a41cbcdt-bindings: phy: g12a-usb3-pcie-phy: fix compatible string documentation | *c9d55f564adt-bindings: phy: g12a-usb2-phy: fix compatible string documentation | *78aa45bb7ausb-storage: apply IGNORE_UAS only for HIKSEMI MD202 on RTL9210 | *a69c8dfb85usb: gadget: f_ncm: fix potential NULL ptr deref in ncm_bitrate() | *1ab67e87b1usb: gadget: g_webcam: Send color matching descriptor per frame | *b08167d8f0usb: typec: altmodes/displayport: Fix pin assignment calculation | *7fb1322e7ausb: typec: altmodes/displayport: Add pin assignment helper | *59f9ee3796usb: typec: tcpm: Fix altmode re-registration causes sysfs create fail | *a1c8a5c2f8usb: host: ehci-fsl: Fix module alias | *f073d10cd5usb: cdns3: remove fetched trb from cache before dequeuing | *73f4bde973USB: serial: cp210x: add SCALANCE LPE-9000 device id | *a2e075f401USB: gadgetfs: Fix race between mounting and unmounting | *2da67bff29tty: fix possible null-ptr-defer in spk_ttyio_release | *cb53a3366etty: serial: qcom-geni-serial: fix slab-out-of-bounds on RX FIFO buffer | *f322dd2e4astaging: mt7621-dts: change some node hex addresses to lower case | *6508788b2cbpf: restore the ebpf program ID for BPF_AUDIT_UNLOAD and PERF_BPF_EVENT_PROG_UNLOAD | *7b122c33bdriscv: dts: sifive: fu740: fix size of pcie 32bit memory | *701f9c3da6thunderbolt: Use correct function to calculate maximum USB3 link rate | *5b1b03a3d3cifs: do not include page data when checking signature | *64287cd456btrfs: fix race between quota rescan and disable leading to NULL pointer deref | *f2e0e1615dbtrfs: do not abort transaction on failure to write log tree when syncing log | *f653abe619mmc: sdhci-esdhc-imx: correct the tuning start tap and step setting | *9881436f01mmc: sunxi-mmc: Fix clock refcount imbalance during unbind | *33bd0db750ACPI: PRM: Check whether EFI runtime is available | *87e1ee6058comedi: adv_pci1760: Fix PWM instruction handling | *b5d24a8e4ausb: core: hub: disable autosuspend for TI TUSB8041 | *61a0890cb9misc: fastrpc: Fix use-after-free race condition for maps | *1b7b7bb400misc: fastrpc: Don't remove map on creater_process and device_release | *e7e41fcf90USB: misc: iowarrior: fix up header size for USB_DEVICE_ID_CODEMERCS_IOW100 | *f3de34d90dstaging: vchiq_arm: fix enum vchiq_status return types | *16d09c4bc9USB: serial: option: add Quectel EM05CN modem | *34d769f0c6USB: serial: option: add Quectel EM05CN (SG) modem | *768d56ed24USB: serial: option: add Quectel EC200U modem | *829916f069USB: serial: option: add Quectel EM05-G (RS) modem | *eb8808f769USB: serial: option: add Quectel EM05-G (CS) modem | *6e0430db19USB: serial: option: add Quectel EM05-G (GR) modem | *f01aefe374prlimit: do_prlimit needs to have a speculation check | *418e2c756dxhci: Detect lpm incapable xHC USB3 roothub ports from ACPI tables | *10cb7d53beusb: acpi: add helper to check port lpm capability using acpi _DSM | *1818e2a97dxhci: Add a flag to disable USB3 lpm on a xhci root port level. | *8911ff7963xhci: Add update_hub_device override for PCI xHCI hosts | *c462ac871fxhci: Fix null pointer dereference when host dies | *f39c813af0usb: xhci: Check endpoint is valid before dereferencing it | *0f175cebc4xhci-pci: set the dma max_seg_size | *89a410dbd0io_uring/rw: defer fsnotify calls to task context | *05d69b372bio_uring: do not recalculate ppos unnecessarily | *ff8a070253io_uring: update kiocb->ki_pos at execution time | *b7958caf41io_uring: remove duplicated calls to io_kiocb_ppos | *86e2d6901aio_uring: ensure that cached task references are always put on exit | *30b9068934io_uring: fix async accept on O_NONBLOCK sockets | *a79b13f249io_uring: allow re-poll if we made progress | *3c1a3d0269io_uring: support MSG_WAITALL for IORING_OP_SEND(MSG) | *390b881631io_uring: add flag for disabling provided buffer recycling | *9b7b0f2116io_uring: ensure recv and recvmsg handle MSG_WAITALL correctly | *cdc68e714dio_uring: improve send/recv error handling | *ccf06b5a98io_uring: pass in EPOLL_URING_WAKE for eventfd signaling and wakeups | *77baf39227eventfd: provide a eventfd_signal_mask() helper | *a2d8ff00a7eventpoll: add EPOLL_URING_WAKE poll wakeup flag | *a9aa4aa7a5io_uring: don't gate task_work run on TIF_NOTIFY_SIGNAL | *bd9a23a4bbhugetlb: unshare some PMDs when splitting VMAs | *393d9e3ed1drm/amd: Delay removal of the firmware framebuffer | *865e244e06drm/amdgpu: disable runtime pm on several sienna cichlid cards(v2) | *560373fb1eALSA: hda/realtek: fix mute/micmute LEDs don't work for a HP platform | *26264260a8ALSA: hda/realtek: fix mute/micmute LEDs for a HP ProBook | *1026756321efi: fix userspace infinite retry read efivars after EFI runtime services page fault | *45627a1a64nilfs2: fix general protection fault in nilfs_btree_insert() | *350d66d9e7zonefs: Detect append writes at invalid locations | *5054d001ffAdd exception protection processing for vd in axi_chan_handle_err function | *a12fd43bd1wifi: mac80211: sdata can be NULL during AMPDU start | *f96a6c009ewifi: brcmfmac: fix regression for Broadcom PCIe wifi devices | *908d1742b6Bluetooth: hci_qca: Fix driver shutdown on closed serdev | *7530fbc05ffbdev: omapfb: avoid stack overflow warning | *e1df7f0b27perf/x86/rapl: Treat Tigerlake like Icelake | *2c129e8689f2fs: let's avoid panic if extent_tree is not created | *58bac74402x86/asm: Fix an assembler warning with current binutils | *fdb4a70bb7btrfs: always report error in run_one_delayed_ref() | *f641067ea2RDMA/srp: Move large values to a new enum for gcc13 | *793f8ac218r8169: move rtl_wol_enable_rx() and rtl_prepare_power_down() | *dc072762f9net/ethtool/ioctl: return -EOPNOTSUPP if we have no phy stats | *308d24d875vduse: Validate vq_num in vduse_validate_config() | *8e1eb926a0virtio_pci: modify ENOENT to EINVAL | *64a6f3689dtools/virtio: initialize spinlocks in vring_test.c | *95fc28a8e9selftests/bpf: check null propagation only neither reg is PTR_TO_BTF_ID | *d4a9d2944fpNFS/filelayout: Fix coalescing test for single DS | *6a3319af6bbtrfs: fix trace event name typo for FLUSH_DELAYED_REFS * |52cea9ba91Merge "Merge 5.15.89 into android13-5.15-lts" into android13-5.15-lts |\ \ | * |de550d72f1Merge 5.15.89 into android13-5.15-lts | |\| | | *3bcc86eb3eLinux 5.15.89 | | *37c18ef49epinctrl: amd: Add dynamic debugging for active GPIOs | | *a5841b81adRevert "usb: ulpi: defer ulpi_register on ulpi_read_id timeout" | | *7ec9a45fc4block: handle bio_split_to_limits() NULL return | | *ba86db02d4io_uring/io-wq: only free worker if it was allocated for creation | | *bb135bcc94io_uring/io-wq: free worker if task_work creation is canceled | | *63c2fa09b8scsi: mpt3sas: Remove scsi_dma_map() error messages | | *e2ea555642efi: fix NULL-deref in init error path | | *94b6cf84dbarm64: cmpxchg_double*: hazard against entire exchange variable | | *3891fa4982arm64: atomics: remove LL/SC trampolines | | *61e86339afarm64: atomics: format whitespace consistently | | *ed4629d1e9io_uring: lock overflowing for IOPOLL | | *fbf5015141KVM: x86: Do not return host topology information from KVM_GET_SUPPORTED_CPUID | | *ee16841134Documentation: KVM: add API issues section | | *b8f3b3cffbmm: Always release pages to the buddy allocator in memblock_free_late(). | | *d2dc110deaplatform/surface: aggregator: Add missing call to ssam_request_sync_free() | | *cfd5978411igc: Fix PPS delta between two synchronized end-points | | *0bf52601ceperf build: Properly guard libbpf includes | | *205f35eee7net/mlx5e: Don't support encap rules with gbp option | | *0526fc9330net/mlx5: Fix ptp max frequency adjustment range | | *9e2c38827cnet/sched: act_mpls: Fix warning during failed attribute validation | | *e3bb44beaftools/nolibc: fix the O_* fcntl/open macro definitions for riscv | | *1e6ec75bb3tools/nolibc: restore mips branch ordering in the _start block | | *bd0431a66ctools/nolibc: Remove .global _start from the entry point code | | *a77c54f5b5tools/nolibc/arch: mark the _start symbol as weak | | *da51e086d1tools/nolibc/arch: split arch-specific code into individual files | | *8591e788betools/nolibc/types: split syscall-specific definitions into their own files | | *4fceecdeaatools/nolibc/std: move the standard type definitions to std.h | | *1792136f22tools/nolibc: use pselect6 on RISCV | | *487386a49etools/nolibc: x86-64: Use `mov $60,%eax` instead of `mov $60,%rax` | | *27af4f2260tools/nolibc: x86: Remove `r8`, `r9` and `r10` from the clobber list | | *a60b24192baf_unix: selftest: Fix the size of the parameter to connect() | | *39ae73e581nfc: pn533: Wait for out_urb's completion in pn533_usb_send_frame() | | *f6003784b1hvc/xen: lock console list traversal | | *79c58b7424octeontx2-af: Fix LMAC config in cgx_lmac_rx_tx_enable | | *303d062881tipc: fix unexpected link reset due to discovery messages | | *e79d0f97ccALSA: usb-audio: Relax hw constraints for implicit fb sync | | *c9557906bdALSA: usb-audio: Make sure to stop endpoints before closing EPs | | *83e758105bASoC: wm8904: fix wrong outputs volume after power reactivation | | *7c26d21872scsi: ufs: core: WLUN suspend SSU/enter hibern8 fail recovery | | *513fdf0b8escsi: ufs: Stop using the clock scaling lock in the error handler | | *13259b60b7scsi: mpi3mr: Refer CONFIG_SCSI_MPI3MR in Makefile | | *470f6a9175regulator: da9211: Use irq handler when ready | | *24107ad469x86/resctrl: Fix task CLOSID/RMID update race | | *cd3da505fbEDAC/device: Fix period calculation in edac_device_reset_delay_period() | | *ab0d02c53ax86/boot: Avoid using Intel mnemonics in AT&T syntax asm | | *a90d339f1fpowerpc/imc-pmu: Fix use of mutex in IRQs disabled section | | *511cf17b24netfilter: ipset: Fix overflow before widen in the bitmap_ip_create() function. | | *b22faa21b6sched/core: Fix use-after-free bug in dup_user_cpus_ptr() | | *d766ccadbeiommu/mediatek-v1: Fix an error handling path in mtk_iommu_v1_probe() | | *c929a230c8iommu/iova: Fix alloc iova overflows issue | | *4b51aa263ausb: ulpi: defer ulpi_register on ulpi_read_id timeout | | *9a8bf443f6bus: mhi: host: Fix race between channel preparation and M0 event | | *456e3794e0ipv6: raw: Deduct extension header length in rawv6_push_pending_frames | | *4c93422a54ixgbe: fix pci device refcount leak | | *e97da5d97aplatform/x86: sony-laptop: Don't turn off 0x153 keyboard backlight during probe | | *f3b1e04dafdt-bindings: msm/dsi: Don't require vcca-supply on 14nm PHY | | *52a5f596c6dt-bindings: msm/dsi: Don't require vdds-supply on 10nm PHY | | *984ad875dbdrm/msm/dp: do not complete dp_aux_cmd_fifo_tx() if irq is not for aux transfer | | *92ae83665eplatform/x86: ideapad-laptop: Add Legion 5 15ARH05 DMI id to set_fn_lock_led_list[] | | *e38b5f81dfdt-bindings: msm: dsi-phy-28nm: Add missing qcom, dsi-phy-regulator-ldo-mode | | *bb32ab40cbdt-bindings: msm: dsi-controller-main: Fix description of core clock | | *3fb8d10beedt-bindings: msm: dsi-controller-main: Fix power-domain constraint | | *dc5b651caddrm/msm/adreno: Make adreno quirks not overwrite each other | | *757d665ee1dt-bindings: msm: dsi-controller-main: Fix operating-points-v2 constraint | | *c90cf47d30platform/x86: dell-privacy: Fix SW_CAMERA_LENS_COVER reporting | | *25b5f693bcplatform/surface: aggregator: Ignore command messages not intended for us | | *ee7b8ce2ccplatform/x86: dell-privacy: Only register SW_CAMERA_LENS_COVER if present | | *e0072068adcifs: Fix uninitialized memory read for smb311 posix symlink create | | *f3495b5e9enet/mlx5e: Set action fwd flag when parsing tc action goto | | *1a8431cc20drm/i915/gt: Reset twice | | *011ecdbcd5drm/virtio: Fix GEM handle creation UAF | | *798dfeeae3s390/percpu: add READ_ONCE() to arch_this_cpu_to_op_simple() | | *a400593eb3s390/cpum_sf: add READ_ONCE() semantics to compare and swap loops | | *d4fa65960aASoC: qcom: lpass-cpu: Fix fallback SD line index handling | | *8400b91c11s390/kexec: fix ipl report address for kdump | | *c07e0babd1perf auxtrace: Fix address filter duplicate symbol selection | | *e81d82da61net: stmmac: add aux timestamps fifo clearance wait | | *44167b74a8docs: Fix the docs build with Sphinx 6.0 | | *24176bf2a1efi: tpm: Avoid READ_ONCE() for accessing the event log | | *01b966b14cselftests: kvm: Fix a compile error in selftests/kvm/rseq_test.c | | *c773ebe11cKVM: arm64: nvhe: Fix build with profile optimization | | *c1d6a72fc8KVM: arm64: Fix S1PTW handling on RO memslots | | *e04e6cd883ALSA: hda/realtek: Enable mute/micmute LEDs on HP Spectre x360 13-aw0xxx | | *b983c9a971ALSA: hda/realtek - Turn on power early | | *9ab3696881ALSA: control-led: use strscpy in set_led_id() | | *a8acfe2c6fnetfilter: nft_payload: incorrect arithmetics when fetching VLAN header bits * | |2c4f6d72f1Merge "Merge 5.15.88 into android13-5.15-lts" into android13-5.15-lts |\| | | * |773ec50a8aMerge 5.15.88 into android13-5.15-lts | |\| | | *90bb4f8f39Linux 5.15.88 | | *cbd3e6d5e5ALSA: hda - Enable headset mic on another Dell laptop with ALC3254 | | *b98dee4746ALSA: hda/hdmi: Add a HP device 0x8715 to force connect list | | *26350c21bcALSA: pcm: Move rwsem lock inside snd_ctl_elem_read to prevent UAF | | *dadd0dcaa6net/ulp: prevent ULP without clone op from entering the LISTEN status | | *04941c1d5bnet: sched: disallow noqueue for qdisc classes | | *068b512193serial: fixup backport of "serial: Deassert Transmit Enable on probe in driver-specific way" | | *46aa155758selftests/vm/pkeys: Add a regression test for setting PKRU through ptrace | | *3c1940c549x86/fpu: Emulate XRSTOR's behavior if the xfeatures PKRU bit is not set | | *3f1c81426ax86/fpu: Allow PKRU to be (once again) written by ptrace. | | *b29773d6b0x86/fpu: Add a pkru argument to copy_uabi_to_xstate() | | *9813c5fc22x86/fpu: Add a pkru argument to copy_uabi_from_kernel_to_xstate(). | | *fea26e83a1x86/fpu: Take task_struct* in copy_sigframe_from_user_to_xstate() | | *d4d152017eparisc: Align parisc MADV_XXX constants with all other architectures | * |1867565896Revert "ASoC/SoundWire: dai: expand 'stream' concept beyond SoundWire" | * |43064ed394Revert "ASoC: Intel/SOF: use set_stream() instead of set_tdm_slots() for HDAudio" | * |959d50edd2Revert "PM/devfreq: governor: Add a private governor_data for governor" * | |c34c76a947Revert "ASoC/SoundWire: dai: expand 'stream' concept beyond SoundWire" * | |33ef84070bRevert "ASoC: Intel/SOF: use set_stream() instead of set_tdm_slots() for HDAudio" * | |e60641bdcaRevert "PM/devfreq: governor: Add a private governor_data for governor" * | |793ec0a9ccMerge "Merge 5.15.87 into android13-5.15-lts" into android13-5.15-lts |\| | | * |fc4de343bdMerge 5.15.87 into android13-5.15-lts | |\| | | *d57287729eLinux 5.15.87 | | *24186c6822drm/mgag200: Fix PLL setup for G200_SE_A rev >=4 | | *e326ee018aio_uring: Fix unsigned 'res' comparison with zero in io_fixup_rw_res() | | *b2b6eefab4efi: random: combine bootloader provided RNG seed with RNG protocol output | | *99c0759495mbcache: Avoid nesting of cache->c_list_lock under bit locks | | *d50d6c193anet: hns3: fix return value check bug of rx copybreak | | *d4e6a13eb9btrfs: make thaw time super block check to also verify checksum | | *70a1dccd0eselftests: set the BUILD variable to absolute path | | *58fef3ebc8ext4: don't allow journal inode to have encrypt flag | | *bd5dc96feamptcp: use proper req destructor for IPv6 | | *78bd6ab52cmptcp: dedicated request sock for subflow in v6 | | *6e9c1aef3eRevert "ACPI: PM: Add support for upcoming AMD uPEP HID AMDI007" | | *e32f867b37ksmbd: check nt_len to be at least CIFS_ENCPWD_SIZE in ksmbd_decode_ntlmssp_auth_blob | | *4136f1ac1eksmbd: fix infinite loop in ksmbd_conn_handler_loop() | | *f10defb0behfs/hfsplus: avoid WARN_ON() for sanity check, use proper error handling | | *48d9e2e6dehfs/hfsplus: use WARN_ON for sanity check | | *f5a9bbf962drm/i915/gvt: fix vgpu debugfs clean in remove | | *ae9a615117drm/i915/gvt: fix gvt debugfs destroy | | *eb3e943a32riscv, kprobes: Stricter c.jr/c.jalr decoding | | *620a229f57riscv: uaccess: fix type of 0 variable on error in get_user() | | *8e05a993f8thermal: int340x: Add missing attribute for data rate base | | *c3222fd282io_uring: fix CQ waiting timeout handling | | *b7b9bc9305block: don't allow splitting of a REQ_NOWAIT bio | | *e1358c8787fbdev: matroxfb: G200eW: Increase max memory from 1 MB to 16 MB | | *682a7d064fnfsd: fix handling of readdir in v4root vs. mount upcall timeout | | *cb42aa7b5fx86/bugs: Flush IBP in ib_prctl_set() | | *554a880a1fx86/kexec: Fix double-free of elf header buffer | | *264241a610btrfs: check superblock to ensure the fs was not modified at thaw time | | *69f4bda5f4nvme: also return I/O command effects from nvme_command_effects | | *a6a4b057cdnvmet: use NVME_CMD_EFFECTS_CSUPP instead of open coding it | | *f9309dcaa9io_uring: check for valid register opcode earlier | | *4df413d469nvme: fix multipath crash caused by flush request when blktrace is enabled | | *03ce792128ASoC: Intel: bytcr_rt5640: Add quirk for the Advantech MICA-071 tablet | | *0dca7375e2udf: Fix extension of the last extent in the file | | *dc1bc90397caif: fix memory leak in cfctrl_linkup_request() | | *bce3680b48drm/i915: unpin on error in intel_vgpu_shadow_mm_pin() | | *da6a3653b8perf stat: Fix handling of --for-each-cgroup with --bpf-counters to match non BPF mode | | *11cd4ec635usb: rndis_host: Secure rndis_query check against int overflow | | *6ea5273c71octeontx2-pf: Fix lmtst ID used in aura free | | *4e5f2c74cbdrivers/net/bonding/bond_3ad: return when there's no aggregator | | *8414983c2efs/ntfs3: don't hold ni_lock when calling truncate_setsize() | | *a23e8376e6drm/imx: ipuv3-plane: Fix overlay plane width | | *a8f7fd322fperf tools: Fix resources leak in perf_data__open_dir() | | *a1e1521b46netfilter: ipset: Rework long task execution when adding/deleting entries | | *6f19a38483netfilter: ipset: fix hash:net,port,net hang with /0 subnet | | *774d259749net: sparx5: Fix reading of the MAC address | | *04dc4003e5net: sched: cbq: dont intepret cls results when asked to drop | | *f02327a487net: sched: atm: dont intepret cls results when asked to drop | | *95da1882cegpio: sifive: Fix refcount leak in sifive_gpio_probe | | *da9c9883ecceph: switch to vfs_inode_has_locks() to fix file lock bug | | *54e72ce5f1filelock: new helper: vfs_inode_has_locks | | *f34b03ce3adrm/meson: Reduce the FIFO lines held when AFBC is not used | | *05a8410b0fRDMA/mlx5: Fix validation of max_rd_atomic caps for DC | | *8d89870d63RDMA/mlx5: Fix mlx5_ib_get_hw_stats when used for device | | *4d112f0016net: phy: xgmiitorgmii: Fix refcount leak in xgmiitorgmii_probe | | *e5fbeb3d16net: ena: Update NUMA TPH hint register upon NUMA node update | | *7840b93cfdnet: ena: Set default value for RX interrupt moderation | | *d09b7a9d2fnet: ena: Fix rx_copybreak value update | | *0e7ad9b006net: ena: Use bitmask to indicate packet redirection | | *5d4964984bnet: ena: Account for the number of processed bytes in XDP | | *f17d9aec07net: ena: Don't register memory info on XDP exchange | | *a4aa727ad0net: ena: Fix toeplitz initial hash value | | *0bec17f1cenet: amd-xgbe: add missed tasklet_kill | | *cb2f74685fnet/mlx5e: Fix hw mtu initializing at XDP SQ allocation | | *6c72abb78bnet/mlx5e: Always clear dest encap in neigh-update-del | | *b36783bc11net/mlx5e: TC, Refactor mlx5e_tc_add_flow_mod_hdr() to get flow attr | | *f8c10eeba3net/mlx5e: IPoIB, Don't allow CQE compression to be turned on by default | | *7227bbb7c1net/mlx5: Avoid recovery in probe flows | | *9369b9afa8net/mlx5: Add forgotten cleanup calls into mlx5_init_once() error path | | *d966f2ee4bnet/mlx5: E-Switch, properly handle ingress tagged packets on VST | | *6a37a01abavdpa_sim: fix vringh initialization in vdpasim_queue_ready() | | *e3462410c3vhost: fix range used in translate_desc() | | *13871f60ecvringh: fix range used in iotlb_translate() | | *e05d4c8c28vhost/vsock: Fix error handling in vhost_vsock_init() | | *586e6fd7d5vdpa_sim: fix possible memory leak in vdpasim_net_init() and vdpasim_blk_init() | | *b63bc2db24nfc: Fix potential resource leaks | | *945e58bdafnet: dsa: mv88e6xxx: depend on PTP conditionally | | *95df720e64qlcnic: prevent ->dcb use-after-free on qlcnic_dcb_enable() failure | | *6c55953e23net: sched: fix memory leak in tcindex_set_parms | | *d14a4b24d5net: hns3: fix VF promisc mode not update when mac table full | | *7ed205b947net: hns3: fix miss L3E checking for rx packet | | *47868cb77fnet: hns3: extract macro to simplify ring stats update code | | *7457c5a776net: hns3: refactor hns3_nic_reuse_page() | | *4a6e9fb534net: hns3: add interrupts re-initialization while doing VF FLR | | *5e48ed805cnfsd: shut down the NFSv4 state objects before the filecache | | *7e2825f5fbveth: Fix race with AF_XDP exposing old or uninitialized descriptors | | *ac95cdafacnetfilter: nf_tables: honor set timeout and garbage collection updates | | *49677ea151vmxnet3: correctly report csum_level for encapsulated packet | | *9d30cb4421netfilter: nf_tables: perform type checking for existing sets | | *c3bfb7784anetfilter: nf_tables: add function to create set stateful expressions | | *996cd779c2netfilter: nf_tables: consolidate set description | | *4f1105ee72drm/panfrost: Fix GEM handle creation ref-counting | | *df493f676fbpf: pull before calling skb_postpull_rcsum() | | *d7e817e689btrfs: fix an error handling path in btrfs_defrag_leaves() | | *4d69cdba2cSUNRPC: ensure the matching upcall is in-flight upon downcall | | *af0265dfefdrm/i915/migrate: fix length calculation | | *8b25a526a5drm/i915/migrate: fix offset calculation | | *a3d1e6f9b6drm/i915/migrate: don't check the scratch page | | *5bc0b2fda4ext4: fix deadlock due to mbcache entry corruption | | *a6e4094fafmbcache: automatically delete entries from cache on freeing | | *1872549129ext4: correct inconsistent error msg in nojournal mode | | *761f88f82eext4: goto right label 'failed_mount3a' | | *eb16602140ravb: Fix "failed to switch device to config mode" message during unbind | | *4216995dbdperf probe: Fix to get the DW_AT_decl_file and DW_AT_call_file as unsinged data | | *d8bbbf2b52perf probe: Use dwarf_attr_integrate as generic DWARF attr accessor | | *b131b5f136media: s5p-mfc: Fix in register read and write for H264 | | *ff27800c0amedia: s5p-mfc: Clear workbit to handle error condition | | *4653ba32admedia: s5p-mfc: Fix to handle reference queue during finishing | | *1bd7283dc0x86/MCE/AMD: Clear DFR errors found in THR handler | | *5ddcd349d9x86/mce: Get rid of msr_ops | | *b8e7ed42bcbtrfs: fix extent map use-after-free when handling missing device in read_one_chunk | | *9c3beebd21btrfs: move missing device handling in a dedicate function | | *7528b21cebbtrfs: replace strncpy() with strscpy() | | *4cef44525fphy: qcom-qmp-combo: fix out-of-bounds clock access | | *855edc4ec6ARM: renumber bits related to _TIF_WORK_MASK | | *18f28f1330ext4: fix off-by-one errors in fast-commit block filling | | *b205332b6bext4: fix unaligned memory access in ext4_fc_reserve_space() | | *9c197dcbacext4: add missing validation of fast-commit record lengths | | *6220ec4055ext4: don't set up encryption key during jbd2 transaction | | *6482d42bafext4: disable fast-commit of encrypted dir operations | | *6969367c15ext4: fix potential out of bound read in ext4_fc_replay_scan() | | *818175ae3bext4: factor out ext4_fc_get_tl() | | *ffd84d0bc5ext4: introduce EXT4_FC_TAG_BASE_LEN helper | | *37914e029bext4: use ext4_debug() instead of jbd_debug() | | *b0ed9a032eext4: remove unused enum EXT4_FC_COMMIT_FAILED | | *394514ddf9tracing: Fix issue of missing one synthetic field | | *5234dd5d20block: mq-deadline: Fix dd_finish_request() for zoned devices | | *78623b10fcdrm/amdgpu: make display pinning more flexible (v2) | | *6363da2c85drm/amdgpu: handle polaris10/11 overlap asics (v2) | | *2771c7a0eeext4: allocate extended attribute value in vmalloc area | | *e995ff918eext4: avoid unaccounted block allocation when expanding inode | | *877247222aext4: initialize quota before expanding inode in setproject ioctl | | *322cf639b0ext4: fix inode leak in ext4_xattr_inode_create() on an error path | | *6380a93b57ext4: fix kernel BUG in 'ext4_write_inline_data_end()' | | *dc3bbc9753ext4: avoid BUG_ON when creating xattrs | | *844c405552ext4: fix error code return to user-space in ext4_get_branch() | | *b870b28e29ext4: fix corruption when online resizing a 1K bigalloc fs | | *d440d6427aext4: fix delayed allocation bug in ext4_clu_mapped for bigalloc + inline | | *def7a39091ext4: init quota for 'old.inode' in 'ext4_rename' | | *3c31d8d3adext4: fix uninititialized value in 'ext4_evict_inode' | | *871800770dext4: fix leaking uninitialized memory in fast-commit journal | | *d480a49c15ext4: fix bug_on in __es_tree_search caused by bad boot loader inode | | *91009e361eext4: check and assert if marking an no_delete evicting inode dirty | | *820eacbc4eext4: fix reserved cluster accounting in __es_remove_extent() | | *0dcbf4dc3dext4: fix bug_on in __es_tree_search caused by bad quota inode | | *06a20a68bbext4: add helper to check quota inums | | *f7e6b5548fext4: add EXT4_IGET_BAD flag to prevent unexpected bad inode | | *205ac16628ext4: fix undefined behavior in bit shift for ext4_check_flag_values | | *cf0e0817b0ext4: fix use-after-free in ext4_orphan_cleanup | | *970bfd7a41fs: ext4: initialize fsdata in pagecache_write() | | *744bbde378ext4: remove trailing newline from ext4_msg() message | | *7192afa5e4ext4: add inode table check in __ext4_get_inode_loc to aovid possible infinite loop | | *0d041b7251ext4: silence the warning when evicting inode with dioread_nolock | | *af4ceb00ebdrm/ingenic: Fix missing platform_driver_unregister() call in ingenic_drm_init() | | *c919e1154bdrm/i915/dsi: fix VBT send packet port selection for dual link DSI | | *6948e570f5drm/vmwgfx: Validate the box size for the snooped cursor | | *5594fde1efdrm/connector: send hotplug uevent on connector cleanup | | *317ebe61a6device_cgroup: Roll back to original exceptions after copy failure | | *ac838c663bparisc: led: Fix potential null-ptr-deref in start_task() | | *2c1881f081remoteproc: core: Do pm_relax when in RPROC_OFFLINE state | | *9b615f957ciommu/amd: Fix ivrs_acpihid cmdline parsing code | | *35b792179bphy: qcom-qmp-combo: fix sc8180x reset | | *dfd05a1335driver core: Fix bus_type.match() error handling in __driver_attach() | | *44618a3397crypto: ccp - Add support for TEE for PCI ID 0x14CA | | *c55507a94bcrypto: n2 - add missing hash statesize | | *4830750696riscv: mm: notify remote harts about mmu cache updates | | *16b6d9525driscv: stacktrace: Fixup ftrace_graph_ret_addr retp argument | | *657b440a27PCI/sysfs: Fix double free in error path | | *67fd41bbb0PCI: Fix pci_device_is_present() for VFs by checking PF | | *bfce073089ipmi: fix use after free in _ipmi_destroy_user() | | *3b4984035cima: Fix a potential NULL pointer access in ima_restore_measurement_list | | *a843699f16mtd: spi-nor: Check for zero erase size in spi_nor_find_best_erase_type() | | *24f4649cd8ipmi: fix long wait in unload when IPMI disconnect | | *fa6bbb4894ipu3-imgu: Fix NULL pointer dereference in imgu_subdev_set_selection() | | *cdb208b090ASoC: jz4740-i2s: Handle independent FIFO flush bits | | *2d0d083d8awifi: wilc1000: sdio: fix module autoloading | | *2e4a088804efi: Add iMac Pro 2017 to uefi skip cert quirk | | *c49fb9b760md/bitmap: Fix bitmap chunk size overflow issues | | *94fe975d54block: mq-deadline: Do not break sequential write streams to zoned HDDs | | *8e91679f7brtc: ds1347: fix value written to century register | | *5eb8296d73cifs: fix missing display of three mount options | | *cfa9f66f91cifs: fix confusing debug message | | *8b45a3b19amedia: dvb-core: Fix UAF due to refcount races at releasing | | *acf984a371media: dvb-core: Fix double free in dvb_register_device() | | *5fac317beeARM: 9256/1: NWFPE: avoid compiler-generated __aeabi_uldivmod | | *ce50c61245staging: media: tegra-video: fix device_node use after free | | *6b16758215staging: media: tegra-video: fix chan->mipi value on error | | *4f5de49d8ctracing: Fix infinite loop in tracing_read_pipe on overflowed print_trace_line | | *17becbc4ddtracing/probes: Handle system names with hyphens | | *2442e655a6tracing/hist: Fix wrong return value in parse_action_params() | | *2a81ff5ce8tracing: Fix complicated dependency of CONFIG_TRACER_MAX_TRACE | | *fe8c35c6fftracing: Fix race where eprobes can be called before the event | | *eb20f6ed37x86/kprobes: Fix optprobe optimization check with CONFIG_RETHUNK | | *3e0fbc06dbx86/kprobes: Fix kprobes instruction boudary check with CONFIG_RETHUNK | | *6268a0704bftrace/x86: Add back ftrace_expected for ftrace bug reports | | *c95cf30dd4x86/microcode/intel: Do not retry microcode reloading on the APs | | *f8fe2f4178KVM: nVMX: Properly expose ENABLE_USR_WAIT_PAUSE control to L1 | | *ca3483d71bKVM: nVMX: Inject #GP, not #UD, if "generic" VMXON CR0/CR4 check fails | | *2c73b349fdKVM: VMX: Resume guest immediately when injecting #GP on ECREATE | | *4a19f48beeof/kexec: Fix reading 32-bit "linux,initrd-{start,end}" values | | *7eddcdb09fperf/core: Call LSM hook after copying perf_event_attr | | *15697f6533tracing/hist: Fix out-of-bound write on 'action_data.var_ref_idx' | | *fd52b86a72dm cache: set needs_check flag after aborting metadata | | *d2a0b298ebdm cache: Fix UAF in destroy() | | *856edd0e92dm clone: Fix UAF in clone_dtr() | | *9215b25f2edm integrity: Fix UAF in dm_integrity_dtr() | | *34cd15d83bdm thin: Fix UAF in run_timer_softirq() | | *ac362c40e3dm thin: resume even if in FAIL mode | | *4b710e8481dm thin: Use last transaction's pmd->root when commit failed | | *f8c26c33fedm thin: Fix ABBA deadlock between shrink_slab and dm_pool_abort_metadata | | *28d307f380dm cache: Fix ABBA deadlock between shrink_slab and dm_cache_metadata_abort | | *a9e89a567fmptcp: remove MPTCP 'ifdef' in TCP SYN cookies | | *13b9fd0deemptcp: mark ops structures as ro_after_init | | *b2120ed7fdfs: dlm: retry accept() until -EAGAIN or error returns | | *5b4478615ffs: dlm: fix sock release if listen fails | | *b7ede8a63dALSA: hda/realtek: Apply dual codec fixup for Dell Latitude laptops | | *dbd1f30191ALSA: patch_realtek: Fix Dell Inspiron Plus 16 | | *8fb4c98f20cpufreq: Init completion before kobject_init_and_add() | | *876c6ab967PM/devfreq: governor: Add a private governor_data for governor | | *0e945ea733selftests: Use optional USERCFLAGS and USERLDFLAGS | | *31697c5953arm64: dts: qcom: sdm850-lenovo-yoga-c630: correct I2C12 pins drive strength | | *1630498660ARM: ux500: do not directly dereference __iomem | | *99590f29b2btrfs: fix resolving backrefs for inline extent followed by prealloc | | *1f9cf4daf2mmc: sdhci-sprd: Disable CLK_AUTO when the clock is less than 400K | | *58d53ff30aarm64: dts: qcom: sdm845-db845c: correct SPI2 pins drive strength | | *a777b90a05perf/x86/intel/uncore: Clear attr_update properly | | *ca77ac238cperf/x86/intel/uncore: Disable I/O stacks to PMU mapping on ICX-D | | *df06e7777cjbd2: use the correct print format | | *8e75b1dd4bktest.pl minconfig: Unset configs instead of just removing them | | *55e5e8b445kest.pl: Fix grub2 menu handling for rebooting | | *823fed7c40soc: qcom: Select REMAP_MMIO for LLCC driver | | *8dabeeb1ffmedia: stv0288: use explicitly signed char | | *d167ebea90net/af_packet: make sure to pull mac header | | *9ff46c36dfnet/af_packet: add VLAN support for AF_PACKET SOCK_RAW GSO | | *cd0f597c8arcu-tasks: Simplify trc_read_check_handler() atomic operations | | *593ca69668ASoC/SoundWire: dai: expand 'stream' concept beyond SoundWire | | *a7874dac6bASoC: Intel/SOF: use set_stream() instead of set_tdm_slots() for HDAudio | | *ae4f70b2fekcsan: Instrument memcpy/memset/memmove with newer Clang | | *d01fa993ebSUNRPC: Don't leak netobj memory when gss_read_proxy_verf() fails | | *43135fb098tpm: tpm_tis: Add the missed acpi_put_table() to fix memory leak | | *986cd9a9b9tpm: tpm_crb: Add the missed acpi_put_table() to fix memory leak | | *638cd298dftpm: acpi: Call acpi_put_table() to fix memory leak | | *d58289fc77mmc: vub300: fix warning - do not call blocking ops when !TASK_RUNNING | | *7eb57bc92ff2fs: allow to read node block after shutdown | | *acc13987fdf2fs: should put a page when checking the summary info | | *35d8a89862mm, compaction: fix fast_isolate_around() to stay within boundaries | | *91bd504128md: fix a crash in mempool_free | | *29328fbce5mfd: mt6360: Add bounds checking in Regmap read/write call-backs | | *c24cc476acpnode: terminate at peers of source | | *0c9118e381ALSA: line6: fix stack overflow in line6_midi_transmit | | *ac4b4fdf32ALSA: line6: correct midi status byte when receiving data from podxt | | *83c44f0ebfovl: Use ovl mounter's fsuid and fsgid in ovl_link() | | *fcb94283e0binfmt: Fix error return code in load_elf_fdpic_binary() | | *ed9947277bhfsplus: fix bug causing custom uid and gid being unable to be assigned with mount | | *76d52b5412pstore/zone: Use GFP_ATOMIC to allocate zone buffer | | *74b0a2fcc3pstore: Properly assign mem_type property | | *d25aac3489HID: plantronics: Additional PIDs for double volume key presses quirk | | *9d4294545cHID: multitouch: fix Asus ExpertBook P2 P2451FA trackpoint | | *7280fdb80bpowerpc/rtas: avoid scheduling in rtas_os_term() | | *d8939315b7powerpc/rtas: avoid device tree lookups in rtas_os_term() | | *23a249b118objtool: Fix SEGFAULT | | *ed686e7a26fs/ntfs3: Fix slab-out-of-bounds in r_page | | *dd34665cb0fs/ntfs3: Delete duplicate condition in ntfs_read_mft() | | *a9847a11b6fs/ntfs3: Use __GFP_NOWARN allocation at ntfs_fill_super() | | *abd2ee2cf4fs/ntfs3: Use __GFP_NOWARN allocation at wnd_init() | | *d7ce7bb688fs/ntfs3: Validate index root when initialize NTFS security | | *f29676cc3asoundwire: dmi-quirks: add quirk variant for LAPBC710 NUC15 | | *9c8471a17ffs/ntfs3: Fix slab-out-of-bounds read in run_unpack | | *3a52f17867fs/ntfs3: Validate resident attribute name | | *3cd9e5b41bfs/ntfs3: Validate buffer length while parsing index | | *c878a915bcfs/ntfs3: Validate attribute name offset | | *f62506f5e4fs/ntfs3: Add null pointer check for inode operations | | *2dd9ccfb06fs/ntfs3: Fix memory leak on ntfs_fill_super() error path | | *ea6b359840fs/ntfs3: Add null pointer check to attr_load_runs_vcn | | *de5e095524fs/ntfs3: Validate data run offset | | *d4489ba8fbfs/ntfs3: Add overflow check for attribute size | | *af7a195deafs/ntfs3: Validate BOOT record_size | | *8e228ac90cnvmet: don't defer passthrough commands with trivial effects to the workqueue | | *f068a7315anvme: fix the NVME_CMD_EFFECTS_CSE_MASK definition | | *576502f25fata: ahci: Fix PCS quirk application for suspend | | *7949b0df3dblock, bfq: fix uaf for bfqq in bfq_exit_icq_bfqq | | *ff3d9ab51cACPI: resource: do IRQ override on Lenovo 14ALC7 | | *698a0813ceACPI: resource: do IRQ override on XMG Core 15 | | *a9ac7633bbACPI: resource: do IRQ override on LENOVO IdeaPad | | *5fe31f2950ACPI: resource: Skip IRQ override on Asus Vivobook K3402ZA/K3502ZA | | *4c5fee0d88nvme-pci: fix page size checks | | *9141144b37nvme-pci: fix mempool alloc size | | *f17cf8fa2cnvme-pci: fix doorbell buffer value endianness | | *ead99ec669Revert "selftests/bpf: Add test for unstable CT lookup API" | | *bf0543b937cifs: fix oops during encryption | | *56f6de394fusb: dwc3: qcom: Fix memory leak in dwc3_qcom_interconnect_init * | |2ce8e6e296ANDROID: add __dev_kfree_skb_irq to virtual_device abi list |/ / * |24bc28221fRevert "net: add atomic_long_t to net_device_stats fields" * |34d878c5b3Revert "ipv6/sit: use DEV_STATS_INC() to avoid data-races" * |956e2924f3Revert "arm64: Treat ESR_ELx as a 64-bit register" * |8a3baaa85eRevert "arm64: mm: kfence: only handle translation faults" * |8b3730f922Revert "gpiolib: protect the GPIO device against being dropped while in use by user-space" * |b0e87c106dRevert "soreuseport: Fix socket selection for SO_INCOMING_CPU." * |8a8a0cb6c6Revert "bpf, sockmap: Fix missing BPF_F_INGRESS flag when using apply_bytes" * |2d4c48bff9Revert "xhci: Prevent infinite loop in transaction errors recovery for streams" * |20ec745823Merge 5.15.86 into android13-5.15-lts |\| | *90ffbb727cLinux 5.15.86 | *3082f8705epwm: tegra: Fix 32 bit build | *caa40d1f85mfd: qcom_rpm: Use devm_of_platform_populate() to simplify code | *408dbaa065extcon: usbc-tusb320: Call the Type-C IRQ handler only if a port is registered | *2471a44769media: dvbdev: fix refcnt bug | *579fb0a332media: dvbdev: fix build warning due to comments | *1115e77c4fnet: stmmac: fix errno when create_singlethread_workqueue() fails | *d3871af13ascsi: qla2xxx: Fix crash when I/O abort times out | *50f993da94btrfs: do not BUG_ON() on ENOMEM when dropping extent items for a range | *1c65d50315ovl: fix use inode directly in rcu-walk mode | *88ec6d1105fbdev: fbcon: release buffer when fbcon_do_set_font() failed | *ca8bcb348agcov: add support for checksum field | *f36d8c8651floppy: Fix memory leak in do_floppy_init() | *4193a6745bregulator: core: fix deadlock on regulator enable | *ce5d0ef1cfiio: adc128s052: add proper .data members in adc128_of_match table | *aec1058f2aiio: adc: ad_sigma_delta: do not use internal iio_dev lock | *dc6afd6070iio: fix memory leak in iio_device_register_eventset() | *38c257ee6areiserfs: Add missing calls to reiserfs_security_free() | *8a4236456asecurity: Restrict CONFIG_ZERO_CALL_USED_REGS to gcc or clang > 15.0.6 | *1cabce56629p: set req refcount to zero to avoid uninitialized usage | *dd2157a98floop: Fix the max_loop commandline argument treatment when it is set to 0 | *fd03bd4c7bHID: mcp2221: don't connect hidraw | *6c886be1ffHID: wacom: Ensure bootloader PID is usable in hidraw mode | *4d640eb112xhci: Prevent infinite loop in transaction errors recovery for streams | *936c5f96c8usb: dwc3: core: defer probe on ulpi_read_id timeout | *e6bf6c4022usb: dwc3: Fix race between dwc3_set_mode and __dwc3_set_mode | *0e883f3bc8arm64: dts: qcom: sm8250: fix USB-DP PHY registers | *ffb14aac26usb: xhci-mtk: fix leakage of shared hcd when fail to set wakeup irq | *fcacd970e0usb: cdnsp: fix lack of ZLP for ep0 | *bcac79df08ALSA: hda/hdmi: Add HP Device 0x8711 to force connect list | *50c23a1107ALSA: hda/realtek: Add quirk for Lenovo TianYi510Pro-14IOB | *76574b3465ALSA: usb-audio: add the quirk for KT0206 device | *9e787dab98ima: Simplify ima_lsm_copy_rule | *2cd365029cpstore: Make sure CONFIG_PSTORE_PMSG selects CONFIG_RT_MUTEXES | *2068d41a3dafs: Fix lost servers_outstanding count | *0def8af038perf debug: Set debug_peo_args and redirect_to_stderr variable to correct values in perf_quiet_option() | *41cccae10epstore: Switch pmsg_lock to an rt_mutex to avoid priority inversion | *8877df8135LoadPin: Ignore the "contents" argument of the LSM hooks | *584202b0f1drm/i915/display: Don't disable DDI/Transcoder when setting phy test pattern | *b253e075b1ASoC: rt5670: Remove unbalanced pm_runtime_put() | *59f797a913ASoC: rockchip: spdif: Add missing clk_disable_unprepare() in rk_spdif_runtime_resume() | *132844d92fASoC: wm8994: Fix potential deadlock | *82f7c814edASoC: mediatek: mt8183: fix refcount leak in mt8183_mt6358_ts3a227_max98357_dev_probe() | *e5d6bf3e5aASoC: rockchip: pdm: Add missing clk_disable_unprepare() in rockchip_pdm_runtime_resume() | *85eb5c952bASoC: audio-graph-card: fix refcount leak of cpu_ep in __graph_for_each_link() | *9ff07316caASoC: mediatek: mt8173-rt5650-rt5514: fix refcount leak in mt8173_rt5650_rt5514_dev_probe() | *7643909cf0ASoC: Intel: Skylake: Fix driver hang during shutdown | *33ff0f9f9cALSA: hda: add snd_hdac_stop_streams() helper | *78649a624dALSA/ASoC: hda: move/rename snd_hdac_ext_stop_streams to hdac_stream.c | *98b0f50fechwmon: (jc42) Fix missing unlock on error in jc42_write() | *5e69233508KVM: selftests: Fix build regression by using accessor function | *6215904fe2tools/include: Add _RET_IP_ and math definitions to kernel.h | *c885326728orangefs: Fix kmemleak in orangefs_{kernel,client}_debug_init() | *39529b79b0orangefs: Fix kmemleak in orangefs_prepare_debugfs_help_string() | *a075c21ee0drm/sti: Fix return type of sti_{dvo,hda,hdmi}_connector_mode_valid() | *f3c14b99f3drm/fsl-dcu: Fix return type of fsl_dcu_drm_connector_mode_valid() | *9a8862820chugetlbfs: fix null-ptr-deref in hugetlbfs_parse_param() | *4f6b206998scsi: elx: libefc: Fix second parameter type in state callbacks | *23f0e9f863scsi: ufs: Reduce the START STOP UNIT timeout | *2cf66428a2scsi: lpfc: Fix hard lockup when reading the rx_monitor from debugfs | *2b3e3ecdb4crypto: hisilicon/hpre - fix resource leak in remove process | *adf6a00859clk: st: Fix memory leak in st_of_quadfs_setup() | *6c8aee0c8fmedia: si470x: Fix use-after-free in si470x_int_in_callback() | *58b6496a74mmc: renesas_sdhi: better reset from HS400 mode | *c33c904124mmc: f-sdh30: Add quirks for broken timeout clock capability | *69346de0ebwifi: mt76: do not run mt76u_status_worker if the device is not running | *feb847e659regulator: core: fix use_count leakage when handling boot-on | *474e70bd90libbpf: Avoid enum forward-declarations in public API in C++ mode | *6209542869drm/amd/display: Use the largest vready_offset in pipe group | *eff45bfbc2blk-mq: fix possible memleak when register 'hctx' failed | *d0af6220bbmedia: dvb-usb: fix memory leak in dvb_usb_adapter_init() | *88a6f8a72dmedia: dvbdev: adopts refcnt to avoid UAF | *438cd29fecmedia: dvb-frontends: fix leak of memory fw | *a96841f5aaethtool: avoiding integer overflow in ethtool_phys_id() | *b327c68acebpf: Prevent decl_tag from being referenced in func_proto arg | *4b8f3b9392ppp: associate skb with a device at tx | *5d5a481a7fmrp: introduce active flags to prevent UAF when applicant uninit | *222cc04356ipv6/sit: use DEV_STATS_INC() to avoid data-races | *8a3b023710net: add atomic_long_t to net_device_stats fields | *58dd11f624drm/amd/display: fix array index out of bound error in bios parser | *a3cc41e05emd/raid1: stop mdx_raid1 thread when raid1 array run failed | *b621d17fe8drivers/md/md-bitmap: check the return value of md_bitmap_get_counter() | *5afac74f15drm/mediatek: Fix return type of mtk_hdmi_bridge_mode_valid() | *072508e99ddrm/sti: Use drm_mode_copy() | *673a3e0199drm/rockchip: Use drm_mode_copy() | *b9b07900d2drm/msm: Use drm_mode_copy() | *5ad774fb82s390/lcs: Fix return type of lcs_start_xmit() | *dfbf0122eas390/netiucv: Fix return type of netiucv_tx() | *8131d1880cs390/ctcm: Fix return type of ctc{mp,}m_tx() | *f9084e9930drm/amdgpu: Fix type of second parameter in odn_edit_dpm_table() callback | *b74580d618drm/amdgpu: Fix type of second parameter in trans_msg() callback | *314f7092b2igb: Do not free q_vector unless new one was allocated | *0b12d2aa26wifi: brcmfmac: Fix potential shift-out-of-bounds in brcmf_fw_alloc_request() | *19bb9e98e1hamradio: baycom_epp: Fix return type of baycom_send_packet() | *a413ebb604net: ethernet: ti: Fix return type of netcp_ndo_start_xmit() | *5d3f4478d2bpf: make sure skb->len != 0 when redirecting to a tunneling device | *be2803dd29qed (gcc13): use u16 for fid to be big enough | *a8bc0ac438Revert "drm/amd/display: Limit max DSC target bpp for specific monitors" | *cc8deb82ccdrm/amd/display: prevent memory leak | *49dd0e8029ipmi: fix memleak when unload ipmi driver | *68871c005fASoC: codecs: rt298: Add quirk for KBL-R RVP platform | *3eca9697c2wifi: ar5523: Fix use-after-free on ar5523_cmd() timed out | *c319196a0ewifi: ath9k: verify the expected usb_endpoints are present | *10c4b63d09brcmfmac: return error when getting invalid max_flowrings from dongle | *ad31bc146fmedia: imx-jpeg: Disable useless interrupt to avoid kernel panic | *6e1a6880e1drm/etnaviv: add missing quirks for GC300 | *367296925chfs: fix OOB Read in __hfs_brec_find | *ebe16676e1acct: fix potential integer overflow in encode_comp_t() | *8b6ef451b5nilfs2: fix shift-out-of-bounds due to too large exponent of block size | *b47f5c579cnilfs2: fix shift-out-of-bounds/overflow in nilfs_sb2_bad_offset() | *5777432ebaACPICA: Fix error code path in acpi_ds_call_control_method() | *10b87da8fafs: jfs: fix shift-out-of-bounds in dbDiscardAG | *5059ea84a8jfs: Fix fortify moan in symlink | *e7a6a53c87udf: Avoid double brelse() in udf_rename() | *0536f76a2bfs: jfs: fix shift-out-of-bounds in dbAllocAG | *88cea1676abinfmt_misc: fix shift-out-of-bounds in check_special_flags | *cadb938a5ex86/hyperv: Remove unregister syscore call from Hyper-V cleanup | *659747f6f6video: hyperv_fb: Avoid taking busy spinlock on panic path | *9d05c20b0aarm64: make is_ttbrX_addr() noinstr-safe | *98a5b1265arcu: Fix __this_cpu_read() lockdep warning in rcu_force_quiescent_state() | *d238f94b2bHID: amd_sfh: Add missing check for dma_alloc_coherent | *9da204cd67net: stream: purge sk_error_queue in sk_stream_kill_queues() | *f47426250fmyri10ge: Fix an error handling path in myri10ge_probe() | *1ec0a7d5b0rxrpc: Fix missing unlock in rxrpc_do_sendmsg() | *5478eb7adcnet_sched: reject TCF_EM_SIMPLE case for complex ematch module | *4f05d8e2fbmailbox: zynq-ipi: fix error handling while device_register() fails | *550f403e46mailbox: arm_mhuv2: Fix return value check in mhuv2_probe() | *28604a960cmailbox: mpfs: read the system controller's status | *8fb773eed4skbuff: Account for tail adjustment during pull operations | *dc0f38957aarm64: dts: mt8183: Fix Mali GPU clock | *790b396f6bsoc: mediatek: pm-domains: Fix the power glitch issue | *0133615a06openvswitch: Fix flow lookup to use unmasked key | *04e454bd97selftests: devlink: fix the fd redirect in dummy_reporter_test | *d52646a46crtc: mxc_v2: Add missing clk_disable_unprepare() | *ac95c4e35figc: Set Qbv start_time and end_time to end_time if not being configured in GCL | *af59985138igc: Lift TAPRIO schedule restriction | *4d50d640edigc: recalculate Qbv end_time by considering cycle time | *1ef9416957igc: allow BaseTime 0 enrollment for Qbv | *c0df8e7ba6igc: Add checking for basetime less than zero | *5b46b53f45igc: Use strict cycles for Qbv scheduling | *fd7d029436igc: Enhance Qbv scheduling by using first flag bit | *9b5b50329er6040: Fix kmemleak in probe and remove | *1b428ba31bunix: Fix race in SOCK_SEQPACKET's unix_dgram_sendmsg() | *aae9c24ebdnfc: pn533: Clear nfc_target before being used | *bcf2c1dc53net: enetc: avoid buffer leaks on xdp_do_redirect() failure | *f463a1295cselftests/bpf: Add test for unstable CT lookup API | *094f3d9314block, bfq: fix possible uaf for 'bfqq->bic' | *cf48cb8debmISDN: hfcmulti: don't call dev_kfree_skb/kfree_skb() under spin_lock_irqsave() | *5607353751mISDN: hfcpci: don't call dev_kfree_skb/kfree_skb() under spin_lock_irqsave() | *ada4022f48mISDN: hfcsusb: don't call dev_kfree_skb/kfree_skb() under spin_lock_irqsave() | *0578f9929fnet: macsec: fix net device access prior to holding a lock | *a472f069cenfsd: under NFSv4.1, fix double svc_xprt_put on rpc_create failure | *f8f1d037d6rtc: pcf85063: fix pcf85063_clkout_control | *35a174552brtc: pic32: Move devm_rtc_allocate_device earlier in pic32_rtc_probe() | *eea105c4e4rtc: st-lpc: Add missing clk_disable_unprepare in st_rtc_probe() | *74248b5560netfilter: flowtable: really fix NAT IPv6 offload | *5c940632camfd: pm8008: Fix return value check in pm8008_probe() | *ec10848e26mfd: pm8008: Remove driver data structure pm8008_data | *38959417d3mfd: qcom_rpm: Fix an error handling path in qcom_rpm_probe() | *b95ae3543emfd: bd957x: Fix Kconfig dependency on REGMAP_IRQ | *615d3c8a46powerpc/pseries/eeh: use correct API for error log size | *68de42e008powerpc/eeh: Drop redundant spinlock initialization | *2b157b4b13remoteproc: qcom: q6v5: Fix missing clk_disable_unprepare() in q6v5_wcss_qcs404_power_on() | *4b191533f5remoteproc: qcom_q6v5_pas: Fix missing of_node_put() in adsp_alloc_memory_region() | *d7628ebca8remoteproc: qcom_q6v5_pas: detach power domains on remove | *fdf47f462aremoteproc: qcom_q6v5_pas: disable wakeup on probe fail or remove | *098ebb9089remoteproc: qcom: q6v5: Fix potential null-ptr-deref in q6v5_wcss_init_mmio() | *131c0a3eadremoteproc: sysmon: fix memory leak in qcom_add_sysmon_subdev() | *4507c6a672pwm: mediatek: always use bus clock for PWM on MT7622 | *4fbbb14f0epwm: mtk-disp: Fix the parameters calculated by the enabled flag of disp_pwm | *eec59807a2pwm: sifive: Call pwm_sifive_update_clock() while mutex is held | *37ea9a6c41iommu/sun50i: Remove IOMMU_DOMAIN_IDENTITY | *8de2c29db6selftests/powerpc: Fix resource leaks | *dd49c5031epowerpc/hv-gpci: Fix hv_gpci event list | *65d3469f3bpowerpc/83xx/mpc832x_rdb: call platform_device_put() in error case in of_fsl_spi_probe() | *cf03db2896powerpc/perf: callchain validate kernel stack pointer bounds | *5de1902244powerpc/xive: add missing iounmap() in error path in xive_spapr_populate_irq_data() | *b31e9647f1powerpc/xmon: Fix -Wswitch-unreachable warning in bpt_cmds | *6a310e8db5cxl: Fix refcount leak in cxl_calc_capp_routing | *0accd460dcpowerpc/52xx: Fix a resource leak in an error handling path | *be2b9b1a60macintosh/macio-adb: check the return value of ioremap() | *19ded60b40macintosh: fix possible memory leak in macio_add_one_device() | *e42b543d08iommu/fsl_pamu: Fix resource leak in fsl_pamu_probe() | *6e501b3fd7iommu/amd: Fix pci device refcount leak in ppr_notifier() | *9383921e8brtc: pcf85063: Fix reading alarm | *b66aa7b306rtc: snvs: Allow a time difference on clock register read | *7a6cc22eabrtc: cmos: Disable ACPI RTC event on removal | *689f757f0artc: cmos: Rename ACPI-related functions | *1c74bbecdartc: cmos: Eliminate forward declarations of some functions | *3a439a2cabrtc: cmos: Call rtc_wake_setup() from cmos_do_probe() | *9febdff75crtc: cmos: Call cmos_wake_setup() from cmos_do_probe() | *d9324fb3eertc: cmos: fix build on non-ACPI platforms | *fe46b9303ertc: cmos: Fix wake alarm breakage | *60c6e563a8rtc: cmos: Fix event handler registration ordering issue | *d3aa083469rtc: rtc-cmos: Do not check ACPI_FADT_LOW_POWER_S0 | *6e98a93c75dmaengine: idxd: Fix crc_val field for completion record | *ab53749c32fs/ntfs3: Fix slab-out-of-bounds read in ntfs_trim_fs | *1ba0968b33pwm: tegra: Improve required rate calculation | *c160505c9binclude/uapi/linux/swab: Fix potentially missing __always_inline | *59463193b0phy: usb: s2 WoL wakeup_count not incremented for USB->Eth devices | *ae00848e55iommu/rockchip: fix permission bits in page table entries v2 | *a7f6ad2c42iommu/sun50i: Fix flush size | *38ccb9b469iommu/sun50i: Fix R/W permission check | *ae4ab47a0biommu/sun50i: Consider all fault sources for reset | *84fee3ce82iommu/sun50i: Fix reset release | *6f9fe31a48fs/ntfs3: Harden against integer overflows | *30f20ceb87overflow: Implement size_t saturating arithmetic helpers | *4b51f27d44fs/ntfs3: Avoid UBSAN error on true_sectors_per_clst() | *28f345bec7RDMA/siw: Fix pointer cast warning | *01d925e2a5perf stat: Do not delay the workload with --delay | *a273f1dd5dperf stat: Refactor __run_perf_stat() common code | *d21534ab4fpower: supply: fix null pointer dereferencing in power_supply_get_battery_info | *d4898d8de6power: supply: ab8500: Fix error handling in ab8500_charger_init() | *30b191798fHSI: omap_ssi_core: Fix error handling in ssi_init() | *a72fe8eb55power: supply: z2_battery: Fix possible memleak in z2_batt_probe() | *5ba0e8fa15perf symbol: correction while adjusting symbol | *a34027b63dperf trace: Handle failure when trace point folder is missed | *60aeacce64perf trace: Use macro RAW_SYSCALL_ARGS_NUM to replace number | *e4700f62dcperf trace: Return error if a system call doesn't exist | *870ad0917dpower: supply: fix residue sysfs file in error handle route of __power_supply_register() | *1c2b9c8100HSI: omap_ssi_core: fix possible memory leak in ssi_probe() | *c5f729d3d6HSI: omap_ssi_core: fix unbalanced pm_runtime_disable() | *ea37831f83fbdev: uvesafb: Fixes an error handling path in uvesafb_probe() | *5bcae36b58fbdev: uvesafb: don't build on UML | *07c1a3c2dffbdev: geode: don't build on UML | *ace8312b5dfbdev: ep93xx-fb: Add missing clk_disable_unprepare in ep93xxfb_probe() | *04946113fbfbdev: vermilion: decrease reference count in error path | *fc0d5034fafbdev: via: Fix error in via_core_init() | *9827246333fbdev: pm2fb: fix missing pci_disable_device() | *3aa4205134fbdev: ssd1307fb: Drop optional dependency | *4958316a6dthermal/drivers/qcom/lmh: Fix irq handler return value | *ad72205ac6thermal/drivers/qcom/temp-alarm: Fix inaccurate warning for gen2 | *37fb4e13d2thermal/drivers/imx8mm_thermal: Validate temperature range | *95c18f4a3csamples: vfio-mdev: Fix missing pci_disable_device() in mdpy_fb_probe() | *31c1b5d300ksmbd: Fix resource leak in ksmbd_session_rpc_open() | *a44102d36atracing/hist: Fix issue of losting command info in error_log | *8308ccfcbdusb: storage: Add check for kcalloc | *96c12fd0eci2c: ismt: Fix an out-of-bounds bug in ismt_access() | *8212800943i2c: mux: reg: check return value after calling platform_get_resource() | *46d8f63bb8gpiolib: protect the GPIO device against being dropped while in use by user-space | *333a271dfdgpiolib: make struct comments into real kernel docs | *7c755a2d6dgpiolib: cdev: fix NULL-pointer dereferences | *b0a26e1999gpiolib: Get rid of redundant 'else' | *4bc217b25evme: Fix error not catched in fake_init() | *31bfe024a9staging: rtl8192e: Fix potential use-after-free in rtllib_rx_Monitor() | *b0aaec894astaging: rtl8192u: Fix use after free in ieee80211_rx() | *ed4580c3f8i2c: pxa-pci: fix missing pci_disable_device() on error in ce4100_i2c_probe | *28dc61cc49chardev: fix error handling in cdev_device_add() | *43bfc7c240mcb: mcb-parse: fix error handing in chameleon_parse_gdd() | *f3686e5e8ddrivers: mcb: fix resource leak in mcb_probe() | *9d4a0aca8ausb: gadget: f_hid: fix refcount leak on error path | *d3136b7970usb: gadget: f_hid: fix f_hidg lifetime vs cdev | *a41c2bba7fusb: roles: fix of node refcount leak in usb_role_switch_is_parent() | *18b9202188coresight: trbe: remove cpuhp instance node before remove cpuhp state | *e854a4ab38counter: stm32-lptimer-cnt: fix the check on arr and cmp registers update | *39a156715fiio: adis: add '__adis_enable_irq()' implementation | *3a2dde8e5diio:imu:adis: Move exports into IIO_ADISLIB namespace | *3c2e13025biio: adis: stylistic changes | *de3e358912iio: adis: handle devices that cannot unmask the drdy pin | *994243de7aiio: temperature: ltc2983: make bulk write buffer DMA-safe | *22511eefa6cxl: fix possible null-ptr-deref in cxl_pci_init_afu|adapter() | *e5021bbf11cxl: fix possible null-ptr-deref in cxl_guest_init_afu|adapter() | *b308fdedeffirmware: raspberrypi: fix possible memory leak in rpi_firmware_probe() | *d5c8f9003amisc: sgi-gru: fix use-after-free error in gru_set_context_option, gru_fault and gru_handle_user_call_os | *848c45964dmisc: tifm: fix possible memory leak in tifm_7xx1_switch_media() | *37a13b274eocxl: fix pci device refcount leak when calling get_function_0() | *3299983a6bmisc: ocxl: fix possible name leak in ocxl_file_register_afu() | *357379d504test_firmware: fix memory leak in test_firmware_init() | *07d547d742serial: sunsab: Fix error handling in sunsab_init() | *919e745fddserial: altera_uart: fix locking in polling mode | *e1c4f18214tty: serial: altera_uart_{r,t}x_chars() need only uart_port | *b133b45ba6tty: serial: clean up stop-tx part in altera_uart_tx_chars() | *6f7d82380fserial: pch: Fix PCI device refcount leak in pch_request_dma() | *0dfc7dfe5bserial: stm32: move dma_request_chan() before clk_prepare_enable() | *926b0967f7serial: pl011: Do not clear RX FIFO & RX interrupt in unthrottle. | *d71a611fcaserial: amba-pl011: avoid SBSA UART accessing DMACR register | *f46f9d2e16extcon: usbc-tusb320: Update state on probe even if no IRQ pending | *ac067e75c4extcon: usbc-tusb320: Add USB TYPE-C support | *9280761167extcon: usbc-tusb320: Factor out extcon into dedicated functions | *05aa8ff22dusb: typec: Factor out non-PD fwnode properties | *31e9c27510extcon: usbc-tusb320: Add support for TUSB320L | *b9c8820d91extcon: usbc-tusb320: Add support for mode setting and reset | *4524599a6ausb: typec: tipd: Fix spurious fwnode_handle_put in error path | *b0d86eacc8usb: typec: tipd: Cleanup resources if devm_tps6598_psy_register fails | *ba75be6f0dusb: typec: tcpci: fix of node refcount leak in tcpci_register_port() | *154d5713a2usb: typec: Check for ops->exit instead of ops->enter in altmode_exit | *1f5661388fstaging: vme_user: Fix possible UAF in tsi148_dma_list_add | *a3c4bc2616usb: fotg210-udc: Fix ages old endianness issues | *5e87d41221uio: uio_dmem_genirq: Fix deadlock between irq config and handling | *79a4bdb6b9uio: uio_dmem_genirq: Fix missing unlock in irq configuration | *3f22a273efvfio: platform: Do not pass return buffer to ACPI _RST method | *417ef049e3class: fix possible memory leak in __class_register() | *f76824ab2bserial: 8250_bcm7271: Fix error handling in brcmuart_init() | *6b4424efcfserial: tegra: Read DMA status before terminating | *a0ead7e8dadrivers: dio: fix possible memory leak in dio_init() | *e8985caf05RISC-V: Align the shadow stack | *ca48174a76IB/IPoIB: Fix queue count inconsistency for PKEY child interfaces | *82bd423ed9hwrng: geode - Fix PCI device refcount leak | *2b79a5e560hwrng: amd - Fix PCI device refcount leak | *42cbff35f4crypto: img-hash - Fix variable dereferenced before check 'hdev->req' | *b9634f99b6RDMA/hns: Fix error code of CMD | *b06bb747ceRDMA/hns: Fix page size cap from firmware | *4c05c7cf25RDMA/hns: Fix PBL page MTR find | *fa267c4192RDMA/hns: Fix AH attr queried by query_qp | *e27fb26e75orangefs: Fix sysfs not cleanup when dev init failed | *3e9c395ef2PCI: mt7621: Add sentinel to quirks table | *bcc65c2e2aPCI: mt7621: Rename mt7621_pci_ to mt7621_pcie_ | *0a7eab1cc4RDMA/srp: Fix error return code in srp_parse_options() | *6301100179RDMA/hfi1: Fix error return code in parse_platform_config() | *339ca035afriscv/mm: add arch hook arch_clear_hugepage_flags | *20d363dcd6crypto: omap-sham - Use pm_runtime_resume_and_get() in omap_sham_probe() | *815b65d714crypto: amlogic - Remove kcalloc without check | *af71199291RDMA/nldev: Fix failure to send large messages | *bb895786a4f2fs: avoid victim selection from previous victim section | *655e955debRDMA/nldev: Add checks for nla_nest_start() in fill_stat_counter_qps() | *1895e908b3scsi: snic: Fix possible UAF in snic_tgt_create() | *09a60f908dscsi: fcoe: Fix transport not deattached when fcoe_if_init() fails | *e59da17205scsi: ipr: Fix WARNING in ipr_init() | *c444f58fdascsi: scsi_debug: Fix possible name leak in sdebug_add_host_helper() | *4e4968dfb5scsi: fcoe: Fix possible name leak when device_register() fails | *0f5006d7d1scsi: scsi_debug: Fix a warning in resp_report_zones() | *2432719b1ascsi: scsi_debug: Fix a warning in resp_verify() | *038359eeccscsi: efct: Fix possible memleak in efct_device_init() | *23053a7926scsi: hpsa: Fix possible memory leak in hpsa_add_sas_device() | *2ab6d5927cscsi: hpsa: Fix error handling in hpsa_add_sas_host() | *6a92129c8fscsi: mpt3sas: Fix possible resource leaks in mpt3sas_transport_port_add() | *26c0f7e1acpadata: Fix list iterator in padata_do_serial() | *17afa98bccpadata: Always leave BHs disabled when running ->parallel() | *221afb2a1bcrypto: tcrypt - Fix multibuffer skcipher speed test mem leak | *bfe10a1d9fscsi: hpsa: Fix possible memory leak in hpsa_init_one() | *38ef0c0b09dt-bindings: visconti-pcie: Fix interrupts array max constraints | *83aad8111bdt-bindings: imx6q-pcie: Fix clock names for imx6sx and imx8mq | *f64f08b9e6RDMA/rxe: Fix NULL-ptr-deref in rxe_qp_do_cleanup() when socket create failed | *35f9cd060eRDMA/hns: fix memory leak in hns_roce_alloc_mr() | *6d5220a553crypto: ccree - Make cc_debugfs_global_fini() available for module init function | *2e9cf3e783RDMA/hfi: Decrease PCI device reference count in error path | *7f476d639cPCI: Check for alloc failure in pci_request_irq() | *49bc2be897RDMA/hns: Fix ext_sge num error when post send | *0e6160d79dRDMA/hns: Repacing 'dseg_len' by macros in fill_ext_sge_inl_data() | *e5ea48788ecrypto: hisilicon/qm - add missing pci_dev_put() in q_num_set() | *442caec12fcrypto: cryptd - Use request context instead of stack for sub-request | *ab677729fccrypto: ccree - Remove debugfs when platform_driver_register failed | *0328ca389ascsi: scsi_debug: Fix a warning in resp_write_scat() | *1ba8ecb664RDMA/siw: Set defined status for work completion with undefined status | *6e757005baRDMA/nldev: Return "-EAGAIN" if the cm_id isn't from expected port | *f981c697b2RDMA/core: Make sure "ib_port" is valid when access sysfs node | *13586753aeRDMA/restrack: Release MR restrack when delete | *6e78ca677fPCI: vmd: Disable MSI remapping after suspend | *47e31b86edIB/mad: Don't call to function that might sleep while in atomic context | *f8d8fbd3b6RDMA/siw: Fix immediate work request flush to completion queue | *2a26849d79scsi: qla2xxx: Fix set-but-not-used variable warnings | *799ed37559RDMA/irdma: Report the correct link speed | *d40d1b1c61f2fs: fix to destroy sbi->post_read_wq in error path of f2fs_fill_super() | *847f725006f2fs: fix normal discard process | *865bb7b5a7f2fs: fix to invalidate dcc->f2fs_issue_discard in error path | *5f509fa740apparmor: Fix memleak in alloc_ns() | *46f3cb83e4crypto: rockchip - rework by using crypto_engine | *3ed0548d39crypto: rockchip - remove non-aligned handling | *5562009f5fcrypto: rockchip - better handle cipher key | *26f3971356crypto: rockchip - add fallback for ahash | *34fe54af3ccrypto: rockchip - add fallback for cipher | *314217591ecrypto: rockchip - do not store mode globally | *853cd97d2bcrypto: rockchip - do not do custom power management | *d5100272e4f2fs: Fix the race condition of resize flag between resizefs | *db72c5dffcPCI: pci-epf-test: Register notifier if only core_init_notifier is enabled | *26ffeff67bRDMA/core: Fix order of nldev_exit call | *a00a7ac251PCI: dwc: Fix n_fts[] array overrun | *10ae636115apparmor: Use pointer to struct aa_label for lbs_cred | *8d50ccfbe2scsi: core: Fix a race between scsi_done() and scsi_timeout() | *9bdf3a59b3crypto: nitrox - avoid double free on error path in nitrox_sriov_init() | *7efc0d39eecrypto: sun8i-ss - use dma_addr instead u32 | *aaef0bdd7acrypto: hisilicon/qm - fix missing destroy qp_idr | *d567776ae2apparmor: Fix abi check to include v8 abi | *bc9d2cbbdcapparmor: fix lockdep warning when removing a namespace | *775a37ffa9apparmor: fix a memleak in multi_transaction_new() | *09f30f394enet: dsa: tag_8021q: avoid leaking ctx on dsa_tag_8021q_register() error path | *86664b8652i40e: Fix the inability to attach XDP program on downed interface | *0abd337acdstmmac: fix potential division by 0 | *93a4a04558Bluetooth: RFCOMM: don't call kfree_skb() under spin_lock_irqsave() | *8d6bbe5241Bluetooth: hci_core: don't call kfree_skb() under spin_lock_irqsave() | *804de4e24aBluetooth: hci_bcsp: don't call kfree_skb() under spin_lock_irqsave() | *1030c3aeeeBluetooth: hci_h5: don't call kfree_skb() under spin_lock_irqsave() | *9fcb5b367eBluetooth: hci_ll: don't call kfree_skb() under spin_lock_irqsave() | *14cc94a598Bluetooth: hci_qca: don't call kfree_skb() under spin_lock_irqsave() | *06467130d5Bluetooth: btusb: don't call kfree_skb() under spin_lock_irqsave() | *e52b7d460aBluetooth: btintel: Fix missing free skb in btintel_setup_combined() | *f7c9de3bcfBluetooth: MGMT: Fix error report for ADD_EXT_ADV_PARAMS | *2addf3cb63sctp: sysctl: make extra pointers netns aware | *21296a52cantb_netdev: Use dev_kfree_skb_any() in interrupt context | *0fff763f11net: lan9303: Fix read error execution path | *882bad40a0can: tcan4x5x: Fix use of register error status mask | *d50092f662can: m_can: Call the RAM init directly from m_can_chip_config | *55064642aacan: tcan4x5x: Remove invalid write in clear_interrupts | *641eef8766net: amd-xgbe: Check only the minimum speed for active/passive cables | *60b35e28dcnet: amd-xgbe: Fix logic around active and passive cables | *d436bf39f4af_unix: call proto_unregister() in the error path in af_unix_init() | *ee9d03bf89net: amd: lance: don't call dev_kfree_skb() under spin_lock_irqsave() | *6f1c4c01cchamradio: don't call dev_kfree_skb() under spin_lock_irqsave() | *eb2c6a6e8fnet: ethernet: dnet: don't call dev_kfree_skb() under spin_lock_irqsave() | *ef08e1082cnet: emaclite: don't call dev_kfree_skb() under spin_lock_irqsave() | *2786ef4066net: apple: bmac: don't call dev_kfree_skb() under spin_lock_irqsave() | *d81314e2ddnet: apple: mace: don't call dev_kfree_skb() under spin_lock_irqsave() | *9a6544343bnet/tunnel: wait until all sk_user_data reader finish before releasing the sock | *998b4e54f5net: farsync: Fix kmemleak when rmmods farsync | *71605c6906ethernet: s2io: don't call dev_kfree_skb() under spin_lock_irqsave() | *ce1b3a41e7of: overlay: fix null pointer dereferencing in find_dup_cset_node_entry() and find_dup_cset_prop() | *8399b98935drivers: net: qlcnic: Fix potential memory leak in qlcnic_sriov_init() | *96e5089702net: stmmac: fix possible memory leak in stmmac_dvr_probe() | *ecaf934e44net: stmmac: selftests: fix potential memleak in stmmac_test_arpoffload() | *e1359bc90anet: defxx: Fix missing err handling in dfx_init() | *c65603abc3net: vmw_vsock: vmci: Check memcpy_from_msg() | *9de42116fcclk: socfpga: Fix memory leak in socfpga_gate_init() | *e515881adebpf: Do not zero-extend kfunc return values | *ce61a877c7blktrace: Fix output non-blktrace event when blk_classic option enabled | *f2ae56fa0bwifi: brcmfmac: Fix error return code in brcmf_sdio_download_firmware() | *23060daf37wifi: rtl8xxxu: Fix the channel width reporting | *6d0e00334ewifi: rtl8xxxu: Add __packed to struct rtl8723bu_c2h | *e69d380650spi: spi-gpio: Don't set MOSI as an input if not 3WIRE mode | *4e501a31afclk: samsung: Fix memory leak in _samsung_clk_register_pll() | *441c05485cmedia: coda: Add check for kmalloc | *b99872178emedia: coda: Add check for dcoda_iram_alloc | *fbf081ebe2media: c8sectpfe: Add of_node_put() when breaking out of loop | *2a7330d820regulator: qcom-labibb: Fix missing of_node_put() in qcom_labibb_regulator_probe() | *ecf1b317a8mmc: core: Normalize the error handling branch in sd_read_ext_regs() | *7fecca429ememstick/ms_block: Add check for alloc_ordered_workqueue | *b77ced3fcememstick: ms_block: Add error handling support for add_disk() | *ae00eb6779mmc: renesas_sdhi: alway populate SCC pointer | *88fa6a4e39mmc: mmci: fix return value check of mmc_add_host() | *29c3690969mmc: wbsd: fix return value check of mmc_add_host() | *0959cc1685mmc: via-sdmmc: fix return value check of mmc_add_host() | *e0cfe7aa41mmc: meson-gx: fix return value check of mmc_add_host() | *62005dfcc3mmc: omap_hsmmc: fix return value check of mmc_add_host() | *1925472decmmc: atmel-mci: fix return value check of mmc_add_host() | *58c3a8d0f1mmc: wmt-sdmmc: fix return value check of mmc_add_host() | *afc898019emmc: vub300: fix return value check of mmc_add_host() | *6444079767mmc: toshsd: fix return value check of mmc_add_host() | *df683201c7mmc: rtsx_usb_sdmmc: fix return value check of mmc_add_host() | *30dc645461mmc: rtsx_pci: fix return value check of mmc_add_host() | *bc7e8744f5mmc: pxamci: fix return value check of mmc_add_host() | *2d496050demmc: mxcmmc: fix return value check of mmc_add_host() | *f0502fe86ammc: moxart: fix return value check of mmc_add_host() | *29c5b4da41mmc: alcor: fix return value check of mmc_add_host() | *52e0d8a8ddriscv, bpf: Emit fixed-length instructions for BPF_PSEUDO_FUNC | *0de70ed675NFSv4.x: Fail client initialisation if state manager thread can't run | *7055c878a0SUNRPC: Fix missing release socket in rpc_sockname() | *79d4cd40daxprtrdma: Fix regbuf data not freed in rpcrdma_req_create() | *cba633b24aALSA: mts64: fix possible null-ptr-defer in snd_mts64_interrupt | *9018550d96media: saa7164: fix missing pci_disable_device() | *2df1e2a6ecALSA: pcm: Set missing stop_operating flag at undoing trigger start | *a443c55d96bpf, sockmap: fix race in sock_map_free() | *5229b90337hwmon: (jc42) Restore the min/max/critical temperatures on resume | *785f5c732ahwmon: (jc42) Convert register access and caching to regmap/regcache | *c4c64d8abdregulator: core: fix resource leak in regulator_register() | *07f82dca11configfs: fix possible memory leak in configfs_create_dir() | *21a061772bhsr: Synchronize sequence number updates. | *a82f5b2e08hsr: Synchronize sending frames to have always incremented outgoing seq nr. | *bb3b40cd6ahsr: Disable netpoll. | *8e148d981bhsr: Avoid double remove of a node. | *9387cbf7f7hsr: Add a rcu-read lock to hsr_forward_skb(). | *a051e10bfcclk: qcom: clk-krait: fix wrong div2 functions | *8275c7465dclk: qcom: lpass-sc7180: Fix pm_runtime usage | *91657ec4d0regulator: core: fix module refcount leak in set_supply() | *66976a3be9wifi: mt76: fix coverity overrun-call in mt76_get_txpower() | *a21e3f6f41wifi: mt76: mt7921: fix reporting of TX AGGR histogram | *c8659018b6mt76: stop the radar detector after leaving dfs channel | *ae19622e7fwifi: cfg80211: Fix not unregister reg_pdev when load_builtin_regdb_keys() fails | *2e32f12998wifi: mac80211: fix memory leak in ieee80211_if_add() | *f58888434dspi: spidev: mask SPI_CS_HIGH in SPI_IOC_RD_MODE | *b6d27d9250bonding: uninitialized variable in bond_miimon_inspect() | *7201e4f4f5bpf, sockmap: Fix data loss caused by using apply_bytes on ingress redirect | *6105ed3598bpf, sockmap: Fix missing BPF_F_INGRESS flag when using apply_bytes | *8786bde11abpf, sockmap: Fix repeated calls to sock_put() when msg has more_data | *a222f992ceInput: wistron_btns - disable on UML | *d78649c21bnetfilter: conntrack: set icmpv6 redirects as RELATED | *09fe3b1392ASoC: pcm512x: Fix PM disable depth imbalance in pcm512x_probe | *8876793e56drm/amdkfd: Fix memory leakage | *8f2d2badf8drm/amdgpu: Fix PCI device refcount leak in amdgpu_atrm_get_bios() | *88c6e0995cdrm/radeon: Fix PCI device refcount leak in radeon_atrm_get_bios() | *0af0ff9fc0drm/amd/pm/smu11: BACO is supported when it's in BACO state | *27e7cf595dASoC: mediatek: mt8173: Enable IRQ when pdata is ready | *905e565375ASoC: mediatek: mt8173: Fix debugfs registration for components | *d8e32f1bf1wifi: iwlwifi: mvm: fix double free on tx path. | *d0bb44775cALSA: asihpi: fix missing pci_disable_device() | *f12377abacNFS: Fix an Oops in nfs_d_automount() | *9a96aff53cNFSv4: Fix a deadlock between nfs4_open_recover_helper() and delegreturn | *c6aca4c7baNFSv4: Fix a credential leak in _nfs4_discover_trunking() | *7f6607c884NFSv4.2: Fix initialisation of struct nfs4_label | *51899eefd1NFSv4.2: Fix a memory stomp in decode_attr_security_label | *34dffc77ddNFSv4.2: Clear FATTR4_WORD2_SECURITY_LABEL when done decoding | *d926611c89ASoC: mediatek: mtk-btcvsd: Add checks for write and read of mtk_btcvsd_snd | *f243ff92d6ASoC: dt-bindings: wcd9335: fix reset line polarity in example | *41d7b8291cdrm/tegra: Add missing clk_disable_unprepare() in tegra_dc_probe() | *2376d7fa08media: s5p-mfc: Add variant data for MFC v7 hardware for Exynos 3250 SoC | *210fcf64bemedia: dvb-usb: az6027: fix null-ptr-deref in az6027_i2c_xfer() | *b223cc15f9media: dvb-core: Fix ignored return value in dvb_register_frontend() | *825a8af31dpinctrl: pinconf-generic: add missing of_node_put() | *eedc698d66clk: imx8mn: fix imx8mn_enet_phy_sels clocks list | *f86a432604clk: imx8mn: fix imx8mn_sai2_sels clocks list | *5e98c3a345clk: imx: replace osc_hdmi with dummy | *9453e097b8clk: imx8mn: rename vpu_pll to m7_alt_pll | *bffc80bac8media: imon: fix a race condition in send_packet() | *9c9ff35d68media: vimc: Fix wrong function called when vimc_init() fails | *f849c116d3ASoC: qcom: Add checks for devm_kcalloc | *16437645dddrbd: destroy workqueue when drbd device was freed | *cdaf45415cdrbd: remove call to memset before free device/resource/connection | *f35981083cmtd: maps: pxa2xx-flash: fix memory leak in probe | *87c750affdbonding: fix link recovery in mode 2 when updelay is nonzero | *02105f0b30drm/amdgpu: fix pci device refcount leak | *5b0a1f1247clk: rockchip: Fix memory leak in rockchip_clk_register_pll() | *27aac5c012regulator: core: use kfree_const() to free space conditionally | *a69b1faa9bALSA: seq: fix undefined behavior in bit shift for SNDRV_SEQ_FILTER_USE_EVENT | *9c0f3617baALSA: pcm: fix undefined behavior in bit shift for SNDRV_PCM_RATE_KNOT | *6159424e2dpinctrl: k210: call of_node_put() | *18a973fcb1HID: hid-sensor-custom: set fixed size for custom attributes | *0fc4280dbebpf: Move skb->len == 0 checks into __bpf_redirect | *8dbcb4c284mtd: spi-nor: Fix the number of bytes for the dummy cycles | *58e1a0ef52mtd: spi-nor: hide jedec_id sysfs attribute if not present | *348d95e39finet: add READ_ONCE(sk->sk_bound_dev_if) in inet_csk_bind_conflict() | *4451bef1a3media: videobuf-dma-contig: use dma_mmap_coherent | *b2781a8626media: platform: exynos4-is: Fix error handling in fimc_md_init() | *7cf71bbe5dmedia: solo6x10: fix possible memory leak in solo_sysfs_init() | *c290aa527fmedia: vidtv: Fix use-after-free in vidtv_bridge_dvb_init() | *648f303102Input: elants_i2c - properly handle the reset GPIO when power is off | *e0d3e46ac6mtd: lpddr2_nvm: Fix possible null-ptr-deref | *ab4e42f519drm/msm/a6xx: Fix speed-bin detection vs probe-defer | *fea795f7c7wifi: ath10k: Fix return value in ath10k_pci_init() | *77482c4dd4block: clear ->slave_dir when dropping the main slave_dir reference | *62251948e2ima: Fix misuse of dereference of pointer in template_desc_init_fields() | *29d6c69ba4integrity: Fix memory leakage in keyring allocation error path | *8e6df95717drm/fourcc: Fix vsub/hsub for Q410 and Q401 | *ec1727f89edrm/fourcc: Add packed 10bit YUV 4:2:0 format | *f72608b8ddregulator: qcom-rpmh: Fix PMR735a S3 regulator spec | *63d011ad05nvme: return err on nvme_init_non_mdts_limits fail | *f289a38df0amdgpu/pm: prevent array underflow in vega20_odn_edit_dpm_table() | *cda1895f3bregulator: core: fix unbalanced of node refcount in regulator_dev_lookup() | *1a5aaa5736nvmet: only allocate a single slab for bvecs | *cb3033a432libbpf: Fix uninitialized warning in btf_dump_dump_type_data | *83baa50939ASoC: pxa: fix null-pointer dereference in filter() | *a06ba0f7f8drm/mediatek: Modify dpi power on/off sequence. | *6d25bc6370drm/radeon: Add the missed acpi_put_table() to fix memory leak | *4cf11e9d31bfq: fix waker_bfqq inconsistency crash | *55e822212erxrpc: Fix ack.bufferSize to be 0 when generating an ack | *5ef8bf0df1net, proc: Provide PROC_FS=n fallback for proc_create_net_single_write() | *d1c44928bbmedia: camss: Clean up received buffers on failed start of streaming | *3b4b4df3f8wifi: rsi: Fix handling of 802.3 EAPOL frames sent via control port | *9e1440c858Input: joystick - fix Kconfig warning for JOYSTICK_ADC | *71212d7318mtd: Fix device name leak when register device failed in add_mtd_device() | *106311677bclk: qcom: gcc-sm8250: Use retention mode for USB GDSCs | *322c7415e7bpf: propagate precision across all frames, not just the last one | *07c286c10abpf: Check the other end of slot_type for STACK_SPILL | *fdbc363bc1bpf: propagate precision in ALU/ALU64 operations | *b29e46610cmedia: platform: exynos4-is: fix return value check in fimc_md_probe() | *ab54081a28media: vivid: fix compose size exceed boundary | *3c58c83c6fbpf: Fix slot type check in check_stack_write_var_off | *cffa75198cdrm/msm/hdmi: use devres helper for runtime PM management | *58d002b72edrm/msm/hdmi: drop unused GPIO support | *2d4bc60693ima: Handle -ESTALE returned by ima_filter_rule_match() | *13fc167e16drm/panel/panel-sitronix-st7701: Remove panel on DSI attach failure | *c20672cfa0spi: Update reference to struct spi_controller | *2858d038c5clk: renesas: r9a06g032: Repair grave increment error | *f6ed73db39drm/rockchip: lvds: fix PM usage counter unbalance in poweron | *13fab6322bcan: kvaser_usb: Compare requested bittiming parameters with actual parameters in do_set_{,data}_bittiming | *4e55d61e87can: kvaser_usb: Add struct kvaser_usb_busparams | *fcfd4df200can: kvaser_usb_leaf: Fix bogus restart events | *51f07da38bcan: kvaser_usb_leaf: Fix wrong CAN state after stopping | *647c26887bcan: kvaser_usb_leaf: Fix improved state not being reported | *9676d65a4acan: kvaser_usb: make use of units.h in assignment of frequency | *c761108562can: kvaser_usb_leaf: Set Warning state even without bus errors | *a60bf9d814can: kvaser_usb: kvaser_usb_leaf: Handle CMD_ERROR_EVENT | *8aae6bddc1can: kvaser_usb: kvaser_usb_leaf: Rename {leaf,usbcan}_cmd_error_event to {leaf,usbcan}_cmd_can_error_event | *972270be24can: kvaser_usb: kvaser_usb_leaf: Get capabilities from device | *e9e0d9945fcan: kvaser_usb: do not increase tx statistics when sending error message frames | *e39bce64e5libbpf: Btf dedup identical struct test needs check for nested structs/arrays | *d4419f93e2media: exynos4-is: don't rely on the v4l2_async_subdev internals | *8741792d82soreuseport: Fix socket selection for SO_INCOMING_CPU. | *094f56192cvenus: pm_helpers: Fix error check in vcodec_domains_get() | *3c793a9ad9media: i2c: ad5820: Fix error path | *07611f9e44media: adv748x: afe: Select input port when initializing AFE | *aa81257dbfmedia: coda: jpeg: Add check for kmalloc | *9a402adc9fmedia: v4l2-ctrls: Fix off-by-one error in integer menu control check | *1caed03305drm/amdgpu/powerplay/psm: Fix memory leak in power state init | *f66a877083ipmi: kcs: Poll OBF briefly to reduce OBE latency | *983320199eata: libata: fix NCQ autosense logic | *a9caf71aebata: add/use ata_taskfile::{error|status} fields | *3483c3fb48ata: libata: move ata_{port,link,dev}_dbg to standard pr_XXX() macros | *6706135577libbpf: Fix null-pointer dereference in find_prog_by_sec_insn() | *a733bf1019libbpf: Fix use-after-free in btf_dump_name_dups | *b5ec2a04fedrm/bridge: adv7533: remove dynamic lane switching from adv7533 bridge | *6d40a49d05wifi: rtl8xxxu: Fix reading the vendor of combo chips | *355f16f756wifi: ath9k: hif_usb: Fix use-after-free in ath9k_hif_usb_reg_in_cb() | *d856f7574bwifi: ath9k: hif_usb: fix memory leak of urbs in ath9k_hif_usb_dealloc_tx_urbs() | *12229a2523platform/mellanox: mlxbf-pmc: Fix event typo | *a0d93aac54rapidio: devices: fix missing put_device in mport_cdev_open | *7af9cb8cbbhfs: Fix OOB Write in hfs_asc2mac | *90962b3b1crelay: fix type mismatch when allocating memory in relay_create_buf() | *0d60b11f8feventfd: change int to __u64 in eventfd_signal() ifndef CONFIG_EVENTFD | *2f5cc7fd73rapidio: fix possible UAF when kfifo_alloc() fails | *337b68da68fs: sysv: Fix sysv_nblocks() returns wrong value | *95d42a8d3dlockd: set other missing fields when unlocking files | *318229b4d3MIPS: OCTEON: warn only once if deprecated link status is being used | *5e6d37a93aMIPS: BCM63xx: Add check for NULL for clk in clk_enable | *50af0ba3e1platform/x86: intel_scu_ipc: fix possible name leak in __intel_scu_ipc_register() | *3cf8150135platform/x86: mxm-wmi: fix memleak in mxm_wmi_call_mx[ds|mx]() | *0ceadb5a3eplatform/chrome: cros_ec_typec: zero out stale pointers | *49c98b5688platform/chrome: cros_ec_typec: Cleanup switch handle return paths | *b55ef8508aPM: runtime: Do not call __rpm_callback() from rpm_idle() | *0bf874183bxen/privcmd: Fix a possible warning in privcmd_ioctl_mmap_resource() | *70966d6b0fx86/xen: Fix memory leak in xen_init_lock_cpu() | *23aef94eeax86/xen: Fix memory leak in xen_smp_intr_init{_pv}() | *03ab1c5c2fuprobes/x86: Allow to probe a NOP instruction with 0x66 prefix | *6fde666278ACPICA: Fix use-after-free in acpi_ut_copy_ipackage_to_ipackage() | *9cabd5f4f1clocksource/drivers/timer-ti-dm: Fix missing clk_disable_unprepare in dmtimer_systimer_init_clock() | *b73c76c3c4cpu/hotplug: Do not bail-out in DYING/STARTING sections | *6eb1802184cpu/hotplug: Make target_store() a nop when target == state | *cd130e2676futex: Resend potentially swallowed owner death notification | *fd8a10d44cfutex: Move to kernel/futex/ | *156144bd18mips: ralink: mt7621: do not use kzalloc too early | *186d59bb6amips: ralink: mt7621: soc queries and tests as functions | *8348da01e5mips: ralink: mt7621: define MT7621_SYSC_BASE with __iomem | *0f8e6fe09cclocksource/drivers/sh_cmt: Access registers according to spec | *a47de2fd3frapidio: rio: fix possible name leak in rio_register_mport() | *ec3f04f74frapidio: fix possible name leaks when rio_add_device() fails | *4662d8e6abdebugfs: fix error when writing negative value to atomic_t debugfs file | *7e8e8cc136lib/notifier-error-inject: fix error when writing -errno to debugfs file | *39b5e6130blibfs: add DEFINE_SIMPLE_ATTRIBUTE_SIGNED for signed value | *19c202e6e5cpufreq: amd_freq_sensitivity: Add missing pci_dev_put() | *93e3c80338genirq/irqdesc: Don't try to remove non-existing sysfs files | *435cc7d18cnfsd: don't call nfsd_file_put from client states seqfile display | *5030d4d2bfNFSD: Finish converting the NFSv2 GETACL result encoder | *e498675e06SUNRPC: Return true/false (not 1/0) from bool functions | *3e255dc210EDAC/i10nm: fix refcount leak in pci_get_dev_wrapper() | *740efb64cairqchip/wpcm450: Fix memory leak in wpcm450_aic_of_init() | *77b99b483firqchip: gic-pm: Use pm_runtime_resume_and_get() in gic_probe() | *5139cbc0c6thermal: core: fix some possible name leaks in error paths | *cab345f9d5platform/chrome: cros_usbpd_notify: Fix error handling in cros_usbpd_notify_init() | *0358bc7cc2perf/x86/intel/uncore: Fix reference count leak in __uncore_imc_init_box() | *433bd587dcperf/x86/intel/uncore: Fix reference count leak in snr_uncore_mmio_map() | *3485f19751perf/x86/intel/uncore: Fix reference count leak in hswep_has_limit_sbox() | *0021ef7dc6perf/x86/intel/uncore: Fix reference count leak in sad_cfg_iio_topology() | *c12b314bb2PNP: fix name memory leak in pnp_alloc_dev() | *f1c7a6af71selftests/efivarfs: Add checking of the test return value | *46be3ee1caMIPS: vpe-cmp: fix possible memory leak while module exiting | *e820a8192fMIPS: vpe-mt: fix possible memory leak while module exiting | *61d68cf2baocfs2: fix memory leak in ocfs2_stack_glue_init() | *e83b47580alib/fonts: fix undefined behavior in bit shift for get_default_font | *0df7d9ab6bproc: fixup uptime selftest | *07b8659b8etimerqueue: Use rb_entry_safe() in timerqueue_getnext() | *413b18866bplatform/x86: huawei-wmi: fix return value calculation | *4b46932283lib/debugobjects: fix stat count and optimize debug_objects_mem_init | *f790dfe816perf: Fix possible memleak in pmu_dev_alloc() | *418d21c0dfselftests/ftrace: event_triggers: wait longer for test_event_enable | *4ea765b106cpufreq: qcom-hw: Fix memory leak in qcom_cpufreq_hw_read_lut() | *c52d9c25d9fs: don't audit the capability check in simple_xattr_list() | *e4d0d13b46PM: hibernate: Fix mistake in kerneldoc comment | *1f62b8e50dx86/sgx: Reduce delay and interference of enclave release | *f5b88170f0alpha: fix syscall entry in !AUDUT_SYSCALL case | *a819ba80b9alpha: fix TIF_NOTIFY_SIGNAL handling | *eb2a732ef4cpuidle: dt: Return the correct numbers of parsed idle states | *3af4f5cb8asched/uclamp: Make asym_fits_capacity() use util_fits_cpu() | *23cb580e0csched/core: Introduce sched_asym_cpucap_active() | *41c2dba388sched/fair: Removed useless update of p->recent_used_cpu | *55ffeab089sched/uclamp: Make select_idle_capacity() use util_fits_cpu() | *4639bfbb83sched/uclamp: Make task_fits_capacity() use util_fits_cpu() | *309e50cbfesched/uclamp: Fix relationship between uclamp and migration margin | *54a766e196sched/fair: Cleanup task_util and capacity type | *26bffaf678ovl: remove privs in ovl_fallocate() | *5dc34f9aaaovl: remove privs in ovl_copyfile() | *9636e70ee2ovl: use ovl_copy_{real,upper}attr() wrappers | *a54843833covl: store lower path in ovl_inode | *163c5bbe7dtpm/tpm_crb: Fix error message in __crb_relinquish_locality() | *fe880e9df9tpm/tpm_ftpm_tee: Fix error handling in ftpm_mod_init() | *ebc73c4f26pstore: Avoid kcore oops by vmap()ing with VM_IOREMAP | *d4dcde11bfARM: mmp: fix timer_read delay | *95916147dcpstore/ram: Fix error return code in ramoops_probe() | *a31a647a3dseccomp: Move copy_seccomp() to no failure path. | *b8b76b8da6arm64: dts: armada-3720-turris-mox: Add missing interrupt for RTC | *820a5ccca7ARM: dts: turris-omnia: Add switch port 6 node | *b311f8e9f5ARM: dts: turris-omnia: Add ethernet aliases | *48ebdd06c9ARM: dts: armada-39x: Fix assigned-addresses for every PCIe Root Port | *f27dd04e44ARM: dts: armada-38x: Fix assigned-addresses for every PCIe Root Port | *1e53c63da8ARM: dts: armada-375: Fix assigned-addresses for every PCIe Root Port | *3af1a73e9eARM: dts: armada-xp: Fix assigned-addresses for every PCIe Root Port | *e4ed8133c4ARM: dts: armada-370: Fix assigned-addresses for every PCIe Root Port | *b335b6344eARM: dts: dove: Fix assigned-addresses for every PCIe Root Port | *5b3415e683arm64: dts: mediatek: mt6797: Fix 26M oscillator unit name | *93f5e66496arm64: dts: mediatek: pumpkin-common: Fix devicetree warnings | *debd938e21arm64: dts: mt2712-evb: Fix usb vbus regulators unit names | *b2c6397754arm64: dts: mt2712-evb: Fix vproc fixed regulators unit names | *96c972f835arm64: dts: mt2712e: Fix unit address for pinctrl node | *2cd1391c28arm64: dts: mt2712e: Fix unit_address_vs_reg warning for oscillators | *39877a3636arm64: dts: mt6779: Fix devicetree build warnings | *af431ce47eARM: dts: nuvoton: Remove bogus unit addresses from fixed-partition nodes | *0a616049ecarm64: dts: ti: k3-j721e-main: Drop dma-coherent in crypto node | *22a740824aarm64: dts: ti: k3-am65-main: Drop dma-coherent in crypto node | *b131304fe7perf/smmuv3: Fix hotplug callback leak in arm_smmu_pmu_init() | *b99fbe8d94perf/arm_dmc620: Fix hotplug callback leak in dmc620_pmu_init() | *9285b623bbperf: arm_dsu: Fix hotplug callback leak in dsu_pmu_init() | *e6318a7e19arm64: mm: kfence: only handle translation faults | *46ddfb9d1earm64: Treat ESR_ELx as a 64-bit register | *681e340128soc: ti: smartreflex: Fix PM disable depth imbalance in omap_sr_probe | *6eca7a2535soc: ti: knav_qmss_queue: Fix PM disable depth imbalance in knav_queue_probe | *972f8fc065soc: ti: knav_qmss_queue: Use pm_runtime_resume_and_get instead of pm_runtime_get_sync | *fe53048f2aarm: dts: spear600: Fix clcd interrupt | *75baeec464arm64: dts: qcom: sm6125: fix SDHCI CQE reg names | *0f9ac04191soc: qcom: apr: Add check for idr_alloc and of_property_read_string_index | *6855dd02c5soc: qcom: apr: make code more reuseable | *c9fb81a835arm64: dts: qcom: sm8250: drop bogus DP PHY clock | *53ffa57464arm64: dts: qcom: sm8350: fix UFS PHY registers | *d5a6bbd7a2arm64: dts: qcom: sm8250: fix UFS PHY registers | *3a52ff845farm64: dts: qcom: sm8150: fix UFS PHY registers | *800f8165e0arm64: dts: qcom: Correct QMP PHY child node name | *ee136f275bsoc: qcom: llcc: make irq truly optional | *aa7ffd4174arm64: dts: qcom: sm8250: correct LPASS pin pull down | *f94bacc616arm64: dts: qcom: pm660: Use unique ADC5_VCOIN address in node name | *d5bf119781drivers: soc: ti: knav_qmss_queue: Mark knav_acc_firmwares as static | *4707d5daf8ARM: dts: stm32: Fix AV96 WLAN regulator gpio property | *33647d7a46ARM: dts: stm32: Drop stm32mp15xc.dtsi from Avenger96 | *9f271a8660objtool, kcsan: Add volatile read/write instrumentation to whitelist | *51fe2dcba8arm64: dts: qcom: msm8916: Drop MSS fallback compatible | *a9fff3524farm64: dts: qcom: sdm845-cheza: fix AP suspend pin bias | *6487f48ea3arm64: dts: qcom: sdm630: fix UART1 pin bias | *6c0c9c5458ARM: dts: qcom: apq8064: fix coresight compatible | *0f9b088d68arm64: dts: qcom: msm8996: fix GPU OPP table | *270683fc7barm64: dts: qcom: msm8996: fix supported-hw in cpufreq OPP tables | *5c5a628914arm64: dts: qcom: msm8996: Add MSM8996 Pro support | *3f14048ee4arm64: dts: qcom: sm8250-sony-xperia-edo: fix touchscreen bias-disable | *89f79f8d7farm64: dts: qcom: ipq6018-cp01-c1: use BLSPI1 pins | *9db5992e72usb: musb: remove extra check in musb_gadget_vbus_draw | *adc063a491drm/amd/display: Manually adjust strobe for DCN303 * |50e12445abMerge 5.15.85 into android13-5.15-lts |\| | *5827ddaf45Linux 5.15.85 | *e22dbadac8net: loopback: use NET_NAME_PREDICTABLE for name_assign_type | *314e7a7836selftests: net: Use "grep -E" instead of "egrep" | *19a7814396Bluetooth: L2CAP: Fix u8 overflow | *f692abf139HID: uclogic: Add HID_QUIRK_HIDINPUT_FORCE quirk | *5325a884e2usb: dwc3: pci: Update PCIe device ID for USB3 controller on CPU sub-system for Raptor Lake | *367e1e3399igb: Initialize mailbox message for VF reset | *a301742b35xhci: Apply XHCI_RESET_TO_DEFAULT quirk to ADL-N | *5e959f0c4cUSB: serial: f81534: fix division by zero on line-speed change | *68fbe268d2USB: serial: f81232: fix division by zero on line-speed change | *3ec7f24b8bUSB: serial: cp210x: add Kamstrup RF sniffer PIDs | *2b092fab23USB: serial: option: add Quectel EM05-G modem | *6b41a35b41usb: gadget: uvc: Prevent buffer overflow in setup handler | *828112571cudf: Fix extending file within last block | *df1a2596c7udf: Do not bother looking for prealloc extents if i_lenExtents matches i_size | *63dbbd8f14udf: Fix preallocation discarding at indirect extent boundary | *79a97f08aeudf: Discard preallocation before extending file with a hole * |fb8d543b61Merge 5.15.84 into android13-5.15-lts |\| | *d68f50bfb0Linux 5.15.84 | *972707bae3net: fec: properly guard irq coalesce setup | *289721fe09ASoC: ops: Correct bounds check for second channel on SX controls | *de0866b94anvme-pci: clear the prp2 field when not used | *8bffa95ac1perf: Fix perf_pending_task() UaF | *825bd2af42ASoC: cs42l51: Correct PGA Volume minimum value | *91582b3a1anet: fec: don't reset irq coalesce settings to defaults on "ip link up" | *c772dab247can: mcba_usb: Fix termination command argument | *aa822de7decan: sja1000: fix size of OCR_MODE_MASK define | *09e08740d7pinctrl: meditatek: Startup with the IRQs disabled | *172a95026flibbpf: Use page size as max_entries when probing ring buffer map | *cf611d7867ASoC: ops: Check bounds for second channel in snd_soc_put_volsw_sx() | *a74b88e170ASoC: fsl_micfil: explicitly clear CHnF flags | *afac1e7d78ASoC: fsl_micfil: explicitly clear software reset bit | *9d933af8fenfp: fix use-after-free in area_cache_get() | *e1a4f5880dvfs: fix copy_file_range() averts filesystem freeze protection | *86e28ed25bx86/vdso: Conditionally export __vdso_sgx_enter_enclave() * |bfbd2237c1Merge 5.15.83 into android13-5.15-lts |\| | *fd6d66840bLinux 5.15.83 | *f895511de9io_uring: Fix a null-ptr-deref in io_tctx_exit_cb() | *f435c66d23io_uring: move to separate directory | *d9e1e5d8a7block: move CONFIG_BLOCK guard to top Makefile | *e5c0bc4ff5can: esd_usb: Allow REC and TEC to return to zero | *db6343a5b0s390/qeth: fix use-after-free in hsci | *a56c1cebe4s390/qeth: fix various format strings | *a6dba316c9macsec: add missing attribute validation for offload | *40500f1f47net: mvneta: Fix an out of bounds check | *b9274dbe39net: thunderbolt: fix memory leak in tbnet_open() | *7390c70bd4ipv6: avoid use-after-free in ip6_fragment() | *1beb475892net: plip: don't call kfree_skb/dev_kfree_skb() under spin_lock_irq() | *b08412a9cfnet: phy: mxl-gpy: fix version reporting | *dec5abd91axen/netback: fix build warning | *54d830e242dpaa2-switch: Fix memory leak in dpaa2_switch_acl_entry_add() and dpaa2_switch_acl_entry_remove() | *c7adcbd0fdethernet: aeroflex: fix potential skb leak in greth_init_rings() | *d962d42d63tipc: call tipc_lxc_xmit without holding node_read_lock | *f3b5dda26cnet: dsa: sja1105: fix memory leak in sja1105_setup_devlink_regions() | *5dab6fa068ipv4: Fix incorrect route flushing when table ID 0 is used | *ac566bd577ipv4: Fix incorrect route flushing when source address is deleted | *af4ccae4b7tipc: Fix potential OOB in tipc_link_proto_rcv() | *b8ce0e6f9fnet: hisilicon: Fix potential use-after-free in hix5hd2_rx() | *1685417774net: mdio: fix unbalanced fwnode reference count in mdio_device_release() | *6f4798ac9cnet: hisilicon: Fix potential use-after-free in hisi_femac_rx() | *114e65a221net: thunderx: Fix missing destroy_workqueue of nicvf_rx_mode_wq | *51c0494575net: microchip: sparx5: Fix missing destroy_workqueue of mact_queue | *99eec0a766ip_gre: do not report erspan version on GRE interface | *2891957853net: stmmac: fix "snps,axi-config" node property parsing | *5cb8f1a784gpio/rockchip: fix refcount leak in rockchip_gpiolib_register() | *b8c2f0392dnvme initialize core quirks before calling nvme_init_subsystem | *908b2da426NFC: nci: Bounds check struct nfc_target arrays | *d841cc1563i40e: Disallow ip4 and ip6 l4_4_bytes | *625a13850bi40e: Fix for VF MAC address 0 | *5538794dbdi40e: Fix not setting default xps_cpus after reset | *a6b30598fenet: mvneta: Prevent out of bounds read in mvneta_config_rss() | *e6e897d4fexen-netfront: Fix NULL sring after live migration | *eefd8953a7octeontx2-pf: Fix potential memory leak in otx2_init_tc() | *f88acaed07net: mdiobus: fix double put fwnode in the error path | *cc62d76928net: mdiobus: fwnode_mdiobus_register_phy() rework error handling | *ea113b570enet: encx24j600: Fix invalid logic in reading of MISTAT register | *8aae746d06net: encx24j600: Add parentheses to fix precedence | *a110287ef4mac802154: fix missing INIT_LIST_HEAD in ieee802154_if_add() | *e046421bedselftests: rtnetlink: correct xfrm policy rule in kci_test_ipsec_offload | *4fa8988a36net: dsa: sja1105: Check return value | *b35be171dfnet: dsa: hellcreek: Check return value | *a4c342e645net: dsa: ksz: Check return value | *edf7284a98Bluetooth: Fix not cleanup led when bt_init fails | *3322193949Bluetooth: 6LoWPAN: add missing hci_dev_put() in get_l2cap_conn() | *6c88c764e0vmxnet3: use correct intrConf reference when using extended queues | *5ad0d85757vmxnet3: correctly report encapsulated LRO packet | *5c014eb0edaf_unix: Get user_ns from in_skb in unix_diag_get_exact(). | *807a01a329drm: bridge: dw_hdmi: fix preference of RGB modes over YUV420 | *eb96fd3983net: broadcom: Add PTP_1588_CLOCK_OPTIONAL dependency for BCMGENET under ARCH_BCM2835 | *16eb678bcaigb: Allocate MSI-X vector when testing | *34c6367c94e1000e: Fix TX dispatch condition | *4271515f18gpio: amd8111: Fix PCI device reference count leak | *d57b60e9b3drm/bridge: ti-sn65dsi86: Fix output polarity setting bug | *f8b2965601netfilter: ctnetlink: fix compilation warning after data race fixes in ct mark | *246bcd05baca8210: Fix crash by zero initializing data | *80dad8df5fieee802154: cc2520: Fix error return code in cc2520_hw_init() | *dd9dcfb85cdrm/vmwgfx: Fix race issue calling pin_user_pages | *7b09ba9036netfilter: nft_set_pipapo: Actually validate intervals in fields after the first one | *6daaa84b62gpiolib: fix memory leak in gpiochip_setup_dev() | *1a1075d371gpiolib: check the 'ngpios' property in core gpiolib code | *70c5515c1cgpiolib: improve coding style for local variables | *3b714f25fcclk: Fix pointer casting to prevent oops in devm_clk_release() | *c142cba37dcan: af_can: fix NULL pointer dereference in can_rcv_filter | *104bb1f67eHID: ite: Enable QUIRK_TOUCHPAD_ON_OFF_REPORT on Acer Aspire Switch V 10 | *f755d11c55HID: core: fix shift-out-of-bounds in hid_report_raw_event | *2d4b310c32HID: hid-lg4ff: Add check for empty lbuf | *5e8021ae08HID: usbhid: Add ALWAYS_POLL quirk for some mice | *5e88c6f4aanet: dsa: sja1105: avoid out of bounds access in sja1105_init_l2_policing() | *1074fefce9drm/shmem-helper: Avoid vm_open error paths | *83e3da8bb9drm/shmem-helper: Remove errant put in error path | *249011f4c3drm/amdgpu/sdma_v4_0: turn off SDMA ring buffer in the s2idle suspend | *1e4fe9a154drm/vmwgfx: Don't use screen objects when SEV is active | *f6550976feKVM: s390: vsie: Fix the initialization of the epoch extension (epdx) field | *fe50a9bbebnet: mana: Fix race on per-CQ variable napi work_done | *a49894a5acBluetooth: Fix crash when replugging CSR fake controllers | *1dee2b5047Bluetooth: btusb: Add debug message for CSR controllers | *3ac29732a2mm/gup: fix gup_pud_range() for dax | *aad8bbd17amemcg: fix possible use-after-free in memcg_write_event_control() | *6fb8bc29bfmedia: v4l2-dv-timings.c: fix too strict blanking sanity checks | *a4c575541eRevert "ARM: dts: imx7: Fix NAND controller size-cells" | *28abc11459soundwire: intel: Initialize clock stop timeout | *22d800b378media: videobuf2-core: take mmap_lock in vb2_get_unmapped_area() | *5d0fa6fc88xen/netback: don't call kfree_skb() with interrupts disabled | *4422241cefxen/netback: do some code cleanup | *0fe29bd925xen/netback: Ensure protocol headers don't fall in the non-linear area | *f01677be31drm/bridge: anx7625: Fix edid_read break case in sp_tx_edid_read() | *ee2536830bcifs: fix use-after-free caused by invalid pointer `hostname` | *dc62f05f66rtc: cmos: avoid UIP when reading alarm time | *48ea4199afrtc: cmos: avoid UIP when writing alarm time | *3f52afc6edrtc: mc146818-lib: extract mc146818_avoid_UIP | *1a3f8c6cd2mm/khugepaged: invoke MMU notifiers in shmem/file collapse paths | *79ad784c9dmm/khugepaged: fix GUP-fast interaction by sending IPI | *d15cd6de01mm/khugepaged: take the right locks for page table retraction | *26f084e554net: usb: qmi_wwan: add u-blox 0x1342 composition | *029a7f1c5d9p/xen: check logical size for buffer size | *b398832893usb: dwc3: gadget: Disable GUSB2PHYCFG.SUSPHY for End Transfer | *e70a572440fbcon: Use kzalloc() in fbcon_prepare_logo() | *fd3768597dregulator: twl6030: fix get status of twl6032 regulators | *9f74b9aa8dASoC: soc-pcm: Add NULL check in BE reparenting | *dae93f4168btrfs: send: avoid unaligned encoded writes when attempting to clone range | *f54e1edf57selftests/net: Find nettest in current directory | *fccd454129ALSA: seq: Fix function prototype mismatch in snd_seq_expand_var_event | *542a563bb7regulator: slg51000: Wait after asserting CS pin | *3d1b5fde369p/fd: Use P9_HDRSZ for header size | *fe2d44e86eASoC: rt711-sdca: fix the latency time of clock stop prepare state machine transitions | *e945f3d809ARM: dts: rockchip: disable arm_global_timer on rk3066 and rk3188 | *c3b818c91aspi: mediatek: Fix DEVAPC Violation at KO Remove | *d9f0107be1ASoC: wm8962: Wait for updated value of WM8962_CLOCKING1 register | *7ae0262748ARM: 9266/1: mm: fix no-MMU ZERO_PAGE() implementation | *d81c62e312ARM: 9251/1: perf: Fix stacktraces for tracepoint events in THUMB2 kernels | *66717ad03bfs: use acquire ordering in __fget_light() | *1222e2364aARM: dts: rockchip: rk3188: fix lcdc1-rgb24 node name | *996fb29b06arm64: dts: rockchip: fix ir-receiver node names | *752138ef89ARM: dts: rockchip: fix ir-receiver node names | *8045971e40arm: dts: rockchip: remove clock-frequency from rtc | *5e9fb8013aarm: dts: rockchip: fix node name for hym8563 rtc | *2ed7137e91arm64: dts: rockchip: keep I2S1 disabled for GPIO function on ROCK Pi 4 series | *5a1122e1a8mmc: mtk-sd: Fix missing clk_disable_unprepare in msdc_of_clock_parse() | *282f52c954clk: Provide new devm_clk helpers for prepared and enabled clocks | *eb94a7a20fclk: generalize devm_clk_get() a bit * |20de784185ANDROID: fix up abi change in struct sdhci_host * |ebd1f8013dANDROID: gki_defconfig: add CONFIG_FUNCTION_ERROR_INJECTION * |112ff45bb5Merge 5.15.82 into android13-5.15-lts |\| | *d979030136Linux 5.15.82 | *48642f9431proc: proc_skip_spaces() shouldn't think it is working on C strings | *3eb9213f66proc: avoid integer type confusion in get_proc_long | *4a4073a2e2ipc/sem: Fix dangling sem_array access in semtimedop race | *53b9b1201eInput: raydium_ts_i2c - fix memory leak in raydium_i2c_send() | *571b6bbbf5char: tpm: Protect tpm_pm_suspend with locks | *f39891cfe7Revert "clocksource/drivers/riscv: Events are stopped during CPU suspend" | *a759057af7ACPI: HMAT: Fix initiator registration for single-initiator systems | *da8a794d71ACPI: HMAT: remove unnecessary variable initialization | *2d16161a2ci2c: imx: Only DMA messages with I2C_M_DMA_SAFE flag set | *950a05cb15i2c: npcm7xx: Fix error handling in npcm_i2c_init() | *db3f8da033serial: stm32: Deassert Transmit Enable on ->rs485_config() | *45f628f4fdserial: stm32: Use TC interrupt to deassert GPIO RTS in RS485 mode | *c60eae5b1dserial: stm32: Factor out GPIO RTS toggling into separate function | *041f8dc882ipv4: Fix route deletion when nexthop info is not specified | *25174d91e4ipv4: Handle attempt to delete multipath route when fib_info contains an nh reference | *a0ad247e55selftests: net: fix nexthop warning cleanup double ip typo | *532847b69cselftests: net: add delete nexthop route warning test | *e078355881Kconfig.debug: provide a little extra FRAME_WARN leeway when KASAN is enabled | *723fa02e0eparisc: Increase FRAME_WARN to 2048 bytes on parisc | *b951ab4b35mm: migrate: fix THP's mapcount on isolation | *c5eda6029cmm: __isolate_lru_page_prepare() in isolate_migratepages_block() | *bdb613ef17iommu/vt-d: Fix PCI device refcount leak in dmar_dev_scope_init() | *b6eea8b2e8iommu/vt-d: Fix PCI device refcount leak in has_external_pci() | *787d81d4ebnvme: fix SRCU protection of nvme_ns_head list | *12f237200criscv: kexec: Fixup irq controller broken in kexec crash path | *ac00301adbriscv: fix race when vmap stack overflow | *fa7a7d185eriscv: Sync efi page table's kernel mappings before switching | *d86d698925pinctrl: single: Fix potential division by zero | *98b15c7066ASoC: ops: Fix bounds check for _sx controls | *f88a6977f8KVM: x86/mmu: Fix race condition in direct_page_fault | *df4b177b48io_uring/poll: fix poll_refs race with cancelation | *4b702b7d11io_uring: make poll refs more robust | *1d58849ac2io_uring: cmpxchg for poll arm refs release | *cd1981a8c3io_uring: fix tw losing poll events | *62321dc7b0io_uring: update res mask in io_poll_check_events | *417d5ea6e7tracing: Free buffers when a used dynamic event is removed | *52fc245d15tracing: Fix race where histograms can be called before the event | *cb2b0612cdtracing/osnoise: Fix duration type | *615a996ff3drm/i915: Never return 0 if not all requests retired | *01a2b25ef2drm/i915: Fix negative value passed as remaining time | *ff1591ba33drm/amdgpu: enable Vangogh VCN indirect sram mode | *ac2d7fa908drm/amdgpu: temporarily disable broken Clang builds due to blown stack-frame | *57ee7bc4c6mmc: sdhci: Fix voltage switch delay | *bb8f809514mmc: sdhci-sprd: Fix no reset data and command after voltage switch | *4c7681c1a5mmc: sdhci-esdhc-imx: correct CQHCI exit halt state check | *01dbe4db59mmc: core: Fix ambiguous TRIM and DISCARD arg | *738946e355mmc: mmc_test: Fix removal of debugfs file | *635d051734net: stmmac: Set MAC's flow control register to reflect current settings | *9132dcdf3bv4l2: don't fall back to follow_pfn() if pin_user_pages_fast() fails | *76ad884be0pinctrl: intel: Save and restore pins in "direct IRQ" mode | *41296b85fax86/bugs: Make sure MSR_SPEC_CTRL is updated properly upon resume from S3 | *33021419fdnilfs2: fix NULL pointer dereference in nilfs_palloc_commit_free_entry() | *2e44dd9a8dtools/vm/slabinfo-gnuplot: use "grep -E" instead of "egrep" | *b60a8ad771error-injection: Add prompt for function error injection | *757eb00c4cALSA: dice: fix regression for Lexicon I-ONIX FW810S | *a1a96a6f30riscv: mm: Proper page permissions after initmem free | *823df3607driscv: vdso: fix section overlapping under some conditions | *6e035d5a2ahwmon: (coretemp) fix pci device refcount leak in nv1a_ram_new() | *7692700ac8hwmon: (coretemp) Check for null before removing sysfs attrs | *9b5836b9c4net: ethernet: renesas: ravb: Fix promiscuous mode after system resumed | *0dfb9a5663sctp: fix memory leak in sctp_stream_outq_migrate() | *fcb3e02161packet: do not set TP_STATUS_CSUM_VALID on CHECKSUM_COMPLETE | *04b995e963net: tun: Fix use-after-free in tun_detach() | *43ca0adf79afs: Fix fileserver probe RTT handling | *543d917f69net: mdiobus: fix unbalanced node reference count | *dca370e575net: hsr: Fix potential use-after-free | *1daec08156tipc: re-fetch skb cb after tipc_msg_validate | *16a64dc265dsa: lan9303: Correct stat name | *766086ea8cnet: wwan: iosm: fix dma_alloc_coherent incompatible pointer type | *c667751a42net: wwan: iosm: fix kernel test robot reported error | *9c584d6d9cnet: ethernet: nixge: fix NULL dereference | *8782b32ef8net/9p: Fix a potential socket leak in p9_socket_open | *6fc9425bffnet: net_netdev: Fix error handling in ntb_netdev_init_module() | *3bc893ef36net: ethernet: ti: am65-cpsw: fix error handling in am65_cpsw_nuss_probe() | *7730904f50net: phy: fix null-ptr-deref while probe() failed | *59b54f0563wifi: mac8021: fix possible oob access in ieee80211_get_rate_duration | *dc0853f8b5wifi: cfg80211: don't allow multi-BSSID in S1G | *88a6fe3707wifi: cfg80211: fix buffer overflow in elem comparison | *08fff7aaebaquantia: Do not purge addresses when setting the number of rings | *2a7aa52573qlcnic: fix sleep-in-atomic-context bugs caused by msleep | *7b734d26f0can: m_can: Add check for devm_clk_get | *ea8dc27bb0can: m_can: pci: add missing m_can_class_free_dev() in probe/remove methods | *b1d2a8e02acan: etas_es58x: es58x_init_netdev(): free netdev when register_candev() | *e53da04e37can: cc770: cc770_isa_probe(): add missing free_cc770dev() | *d452a71995can: sja1000_isa: sja1000_isa_probe(): add missing free_sja1000dev() | *372eb550fanet/mlx5e: Fix use-after-free when reverting termination table | *839eeab03cnet/mlx5: Fix uninitialized variable bug in outlen_write() | *34feea3bfbnet/mlx5: DR, Fix uninitialized var warning | *3485ef2aabnet/mlx5: DR, Rename list field in matcher struct to list_node | *9fc27d22cde100: Fix possible use after free in e100_xmit_prepare | *0d9f5bd54biavf: Fix error handling in iavf_init_module() | *b0b2b9050ciavf: remove redundant ret variable | *69501d8205fm10k: Fix error handling in fm10k_init_module() | *5e3657dedei40e: Fix error handling in i40e_init_module() | *7109e94109ixgbevf: Fix resource leak in ixgbevf_init_module() | *196ea810e2of: property: decrement node refcount in of_fwnode_get_reference_args() | *36164db278nvmem: rmem: Fix return value check in rmem_read() | *e376183167bpf: Do not copy spin lock field from user in bpf_selem_alloc | *45f6e81863hwmon: (ibmpex) Fix possible UAF when ibmpex_register_bmc() fails | *a90251376chwmon: (i5500_temp) fix missing pci_disable_device() | *eeb31b828dhwmon: (ina3221) Fix shunt sum critical calculation | *9514b95cachwmon: (ltc2947) fix temperature scaling | *0140e079a4libbpf: Handle size overflow for ringbuf mmap | *06d5790e7dARM: at91: rm9200: fix usb device clock id | *d074f173fbscripts/faddr2line: Fix regression in name resolution on ppc64le | *ee3d37d796bpf, perf: Use subprog name when reporting subprog ksymbol | *ec02fc0a41iio: light: rpr0521: add missing Kconfig dependencies | *f7419fc42aiio: health:afe4404: Fix oob read in afe4404_[read|write]_raw | *e7e76a77aaiio: health: afe4403: Fix oob read in afe4403_read_raw | *ebdca90efbdrm/amdgpu: Partially revert "drm/amdgpu: update drm_display_info correctly when the edid is read" | *c365d3c3e5drm/amdgpu: update drm_display_info correctly when the edid is read | *df5346466edrm/display/dp_mst: Fix drm_dp_mst_add_affected_dsc_crtcs() return code | *044da1a371btrfs: qgroup: fix sleep from invalid context bug in btrfs_qgroup_inherit() | *da86809ab8btrfs: move QUOTA_ENABLED check to rescan_should_stop from btrfs_qgroup_rescan_worker | *5d66eadc1cspi: spi-imx: Fix spi_bus_clk if requested clock is higher than input clock | *6b4544a131btrfs: free btrfs_path before copying inodes to userspace | *c7ae3beceebtrfs: sink iterator parameter to btrfs_ioctl_logical_to_ino | *acc2f40b98erofs: fix order >= MAX_ORDER warning due to crafted negative i_size | *ca9f27448adrm/i915/gt: Use i915_vm_put on ppgtt_create error paths | *c2f2972889drm/i915: Create a dummy object for gen6 ppgtt | *918002bdbearm64: mte: Avoid setting PG_mte_tagged if no tags cleared or restored * |d753150bdcRevert "serial: Add rs485_supported to uart_port" * |8ccd9528beRevert "serial: fsl_lpuart: Fill in rs485_supported" * |a924bb92c6Merge 5.15.81 into android13-5.15-lts |\| | *e4a7232c91Linux 5.15.81 | *5c5c563a08cifs: fix missed refcounting of ipc tcon | *ee2d04f23bdrm/i915: fix TLB invalidation for Gen12 video and compute engines | *bef834845ddrm/amdgpu: always register an MMU notifier for userptr | *7901de7aa8drm/amdgpu: Enable Aldebaran devices to report CU Occupancy | *e7bf1fe538drm/amd/display: No display after resume from WB/CB | *5033cba00cdrm/amd/dc/dce120: Fix audio register mapping, stop triggering KASAN | *b8dc245909btrfs: sysfs: normalize the error handling branch in btrfs_init_sysfs() | *914baca57abtrfs: use kvcalloc in btrfs_get_dev_zone_info | *c1e6d4bfdebtrfs: zoned: fix missing endianness conversion in sb_write_pointer | *d88bf6be02btrfs: free btrfs_path before copying subvol info to userspace | *f218b404fcbtrfs: free btrfs_path before copying fspath to userspace | *fea9397101btrfs: free btrfs_path before copying root refs to userspace | *7d0c25b5fegenirq: Take the proposed affinity at face value if force==true | *f17657cce0irqchip/gic-v3: Always trust the managed affinity provided by the core code | *52a93f2dcfgenirq: Always limit the affinity to online CPUs | *599cf4b845genirq/msi: Shutdown managed interrupts with unsatifiable affinities | *7aed1dd5d2wifi: wilc1000: validate number of channels | *e9de501cf7wifi: wilc1000: validate length of IEEE80211_P2P_ATTR_CHANNEL_LIST attribute | *143232cb5awifi: wilc1000: validate length of IEEE80211_P2P_ATTR_OPER_CHANNEL attribute | *cd9c486971wifi: wilc1000: validate pairwise and authentication suite offsets | *f2fb18d429fuse: lock inode unconditionally in fuse_fallocate() | *bb1c33bdf4dm integrity: clear the journal on suspend | *20ad31b09edm integrity: flush the journal on suspend | *5ca2110ba5gpu: host1x: Avoid trying to use GART on Tegra20 | *97f47617e8scsi: iscsi: Fix possible memory leak when device_register() failed | *56ab7f237enet: usb: qmi_wwan: add Telit 0x103a composition | *e2e33f213dtcp: configurable source port perturb table size | *269928e5c7platform/x86: ideapad-laptop: Fix interrupt storm on fn-lock toggle on some Yoga laptops | *17d995dc69platform/x86: hp-wmi: Ignore Smart Experience App event | *e85bdc7872zonefs: fix zone report size in __zonefs_io_error() | *982fcd83fbdrm/amdgpu: disable BACO support on more cards | *ea11f8197dplatform/x86: acer-wmi: Enable SW_TABLET_MODE on Switch V 10 (SW5-017) | *09af15e691platform/x86: asus-wmi: add missing pci_dev_put() in asus_wmi_set_xusb2pr() | *ba040bea9dxen/platform-pci: add missing free_irq() in error path | *6815b2087dxen-pciback: Allow setting PCI_MSIX_FLAGS_MASKALL too | *4c13ddb74fASoC: stm32: dfsdm: manage cb buffers cleanup | *dd82295a23Input: i8042 - apply probe defer to more ASUS ZenBook models | *e12e121febInput: soc_button_array - add Acer Switch V 10 to dmi_use_low_level_irq[] | *9f5c167074Input: soc_button_array - add use_low_level_irq module parameter | *aaef86eac9Input: goodix - try resetting the controller when no config is set | *e2223f5fbbserial: 8250: 8250_omap: Avoid RS485 RTS glitch on ->set_termios() | *4e208294detools: iio: iio_generic_buffer: Fix read size | *0d0e2545faASoC: Intel: bytcht_es8316: Add quirk for the Nanote UMPC-01 | *e394cf9d7aInput: synaptics - switch touchpad on HP Laptop 15-da3001TU to RMI mode | *96b5d11777x86/ioremap: Fix page aligned size calculation in __ioremap_caller() | *d048f74815x86/pm: Add enumeration check before spec MSRs save/restore setup | *070e3560bfx86/tsx: Add a feature bit for TSX control MSR support | *1430c98ebbKVM: x86: remove exit_int_info warning in svm_handle_exit | *27550a5930KVM: x86: add kvm_leave_nested | *3e87cb0caaKVM: x86: nSVM: harden svm_free_nested against freeing vmcb02 while still in use | *6425c590d0KVM: x86: forcibly leave nested mode on vCPU reset | *f42ebf972aKVM: x86: nSVM: leave nested mode on vCPU free | *7b3c9405b2mm: vmscan: fix extreme overreclaim and swap floods | *feb2eda5e1gcov: clang: fix the buffer overflow issue | *ea6aa25c9anilfs2: fix nilfs_sufile_mark_dirty() not set segment usage as dirty | *9d97a9fbfcusb: dwc3: gadget: Clear ep descriptor last | *02632ea4dfusb: dwc3: gadget: Return -ESHUTDOWN on ep disable | *765ca3e63fusb: dwc3: gadget: conditionally remove requests | *7945cbf866bus: ixp4xx: Don't touch bit 7 on IXP42x | *39c039018aiio: core: Fix entry not deleted when iio_register_sw_trigger_type() fails | *0791a5ddbaiio: light: apds9960: fix wrong register for gesture gain | *f0158b9bfcarm64: dts: rockchip: lower rk3399-puma-haikou SD controller clock frequency | *277d19ec28ext4: fix use-after-free in ext4_ext_shift_extents | *c9d133100busb: cdnsp: fix issue with ZLP - added TD_SIZE = 1 | *c2ad434cd4usb: cdnsp: Fix issue with Clear Feature Halt Endpoint | *1d91c64887usb: dwc3: exynos: Fix remove() function | *0a216625c3KVM: arm64: pkvm: Fixup boot mode to reflect that the kernel resumes from EL1 | *f0044a4a31mmc: sdhci-brcmstb: Fix SDHCI_RESET_ALL for CQHCI | *8e6940979bmmc: sdhci-brcmstb: Enable Clock Gating to save power | *24b46bfa96mmc: sdhci-brcmstb: Re-organize flags | *227543ccacnios2: add FORCE for vmlinuz.gz | *6a4ea16a67init/Kconfig: fix CC_HAS_ASM_GOTO_TIED_OUTPUT test with dash | *c4a9046c27lib/vdso: use "grep -E" instead of "egrep" | *5fefdceafbs390/crashdump: fix TOD programmable field size | *592b6fd74anet: thunderx: Fix the ACPI memory leak | *697eb30a35octeontx2-af: Fix reference count issue in rvu_sdp_init() | *6ba1687ea1octeontx2-pf: Add check for devm_kcalloc | *26c31e7c73net: enetc: preserve TX ring priority across reconfiguration | *0e16bbf616net: enetc: cache accesses to &priv->si->hw | *68de40f66anet: enetc: manage ENETC_F_QBV in priv->active_offloads only when enabled | *5c0858e142nfc: st-nci: fix incorrect sizing calculations in EVT_TRANSACTION | *e09243fb16nfc: st-nci: fix memory leaks in EVT_TRANSACTION | *dca20b7a19nfc: st-nci: fix incorrect validating logic in EVT_TRANSACTION | *67d638f8efs390/dasd: fix no record found for raw_track_access | *88277853cfarcnet: fix potential memory leak in com20020_probe() | *1d44ec8507ipv4: Fix error return code in fib_table_insert() | *918e83c6bfdccp/tcp: Reset saddr on failure after inet6?_hash_connect(). | *8ce9b1c97ffs: do not update freeing inode i_io_list | *8db9e60cdfnetfilter: flowtable_offload: add missing locking | *c1da3bfca1netfilter: ipset: restore allowing 64 clashing elements in hash:net,iface | *606091b2f6dma-buf: fix racing conflict of dma_heap_add() | *8af9450befbnx2x: fix pci device refcount leak in bnx2x_vf_is_pcie_pending() | *251bcf6cfbregulator: twl6030: re-add TWL6032_SUBCLASS | *6258a8f913NFC: nci: fix memory leak in nci_rx_data_packet() | *ffe6021154net: sched: allow act_ct to be built without NF_NAT | *a05c0f9511net: sparx5: fix error handling in sparx5_port_open() | *182ef20f0fsfc: fix potential memleak in __ef100_hard_start_xmit() | *2da022fac9net: wwan: iosm: use ACPI_FREE() but not kfree() in ipc_pcie_read_bios_cfg() | *a48b345b87xfrm: Fix ignored return value in xfrm6_init() | *19989e1635xfrm: Fix oops in __xfrm_state_delete() | *46d450067ftipc: check skb_linearize() return value in tipc_disc_rcv() | *33fb115a76tipc: add an extra conn_get in tipc_conn_alloc | *4ae907c45ftipc: set con sock in tipc_conn_alloc | *ef866d9ea9net/mlx5: Fix handling of entry refcount when command is not issued to FW | *3101318939net/mlx5: Fix FW tracer timestamp calculation | *1eaabb5bbbnet/mlx5: Do not query pci info while pci disabled | *8180099b2anetfilter: ipset: regression in ip_set_hash_ip.c | *448b627370Drivers: hv: vmbus: fix possible memory leak in vmbus_device_register() | *082c31cb99Drivers: hv: vmbus: fix double free in the error path of vmbus_add_channel_work() | *7fdd9daa5bmacsec: Fix invalid error code set | *e8fb93a079nfp: add port from netdev validation for EEPROM access | *e44e424ed9nfp: fill splittable of devlink_port_attrs correctly | *527046c138net: pch_gbe: fix pci device refcount leak while module exiting | *f77c84dd5bocteontx2-af: debugsfs: fix pci device refcount leak | *cd581ffd8dnet/qla3xxx: fix potential memleak in ql3xxx_send() | *a8976074e2net: mvpp2: fix possible invalid pointer dereference | *3a4cc56cd1net/mlx4: Check retval of mlx4_bitmap_init | *c368220e17net: ethernet: mtk_eth_soc: fix error handling in mtk_open() | *d9729437b2ARM: dts: imx6q-prti6q: Fix ref/tcxo-clock-frequency properties | *1c0b6a97c4ARM: mxs: fix memory leak in mxs_machine_init() | *ecff08f3c4iavf: Fix race condition between iavf_shutdown and iavf_remove | *31147d4e90iavf: Do not restart Tx queues after reset task failure | *232942b26ciavf: Fix a crash during reset task | *0600615d01netfilter: nf_tables: do not set up extensions for end interval | *60387731e6netfilter: conntrack: Fix data-races around ct mark | *ee3ccd1abb9p/fd: fix issue of list_del corruption in p9_fd_cancel() | *131c2eeabcnet: pch_gbe: fix potential memleak in pch_gbe_tx_queue() | *f58df483ffnfc/nci: fix race with opening and closing | *da22d7410anet: dsa: sja1105: disallow C45 transactions on the BASE-TX MDIO bus | *38fe0988bdrxrpc: Fix race between conn bundle lookup and bundle removal [ZDI-CAN-15975] | *d92151b465rxrpc: Use refcount_t rather than atomic_t | *3c33e41fa5rxrpc: Allow list of in-use local UDP endpoints to be viewed in /proc | *46cefa2689net: liquidio: simplify if expression | *95500ee0b3selftests: mptcp: fix mibit vs mbit mix up | *f8c4da198eselftests: mptcp: more stable simult_flows tests | *1c0efab08cARM: dts: at91: sam9g20ek: enable udc vbus gpio pinctrl | *ade662f3f2tee: optee: fix possible memory leak in optee_register_device() | *d1dd119134bus: sunxi-rsb: Support atomic transfers | *b1ed61e706bus: sunxi-rsb: Remove the shutdown callback | *61a41d1abcregulator: core: fix UAF in destroy_regulator() | *a85c0db3f5spi: dw-dma: decrease reference count in dw_spi_dma_init_mfld() | *d9f9b3255bregulator: core: fix kobject release warning and memory leak in regulator_register() | *bd419c7c68ASoC: max98373: Add checks for devm_kcalloc | *f9bc4a18e7scsi: storvsc: Fix handling of srb_status and capacity change events | *c2153fe2d0x86/hyperv: Restore VP assist page after cpu offlining/onlining | *b2ddd76237ASoC: soc-pcm: Don't zero TDM masks in __soc_pcm_open() | *dd62cb7e6fASoC: sgtl5000: Reset the CHIP_CLK_CTRL reg on remove | *d80ffd4823ASoC: hdac_hda: fix hda pcm buffer overflow issue | *10bee7eb2aARM: dts: am335x-pcm-953: Define fixed regulators in root node | *8fe533c0f9af_key: Fix send_acquire race with pfkey_register | *0c69a4658exfrm: replay: Fix ESN wrap around for GSO | *ecc6ce4fdfxfrm: fix "disable_policy" on ipv4 early demux | *5a792c1d4dMIPS: pic32: treat port as signed integer | *144452b421RISC-V: vdso: Do not add missing symbols to version section in linker script | *799970b8ccALSA: usb-audio: add quirk to fix Hamedal C20 disconnect issue | *38b09dc14fRevert "drm/amdgpu: Revert "drm/amdgpu: getting fan speed pwm for vega10 properly"" | *44d50fccf8nvmet: fix memory leak in nvmet_subsys_attr_model_store_locked | *5adc12d9e2arm64/syscall: Include asm/ptrace.h in syscall_wrapper header. | *1340f02773block, bfq: fix null pointer dereference in bfq_bio_bfqg() | *86d4dca4a6drm: panel-orientation-quirks: Add quirk for Acer Switch V 10 (SW5-017) | *b90e6234f5scsi: scsi_debug: Make the READ CAPACITY response compliant with ZBC | *cdbba6a4descsi: ibmvfc: Avoid path failures during live migration | *6e8124a151platform/x86/intel/hid: Add some ACPI device IDs | *32735e24f4platform/x86/intel/pmt: Sapphire Rapids PMT errata fix | *83a6823016platform/x86: touchscreen_dmi: Add info for the RCA Cambio W101 v2 2-in-1 | *f707986a14platform/x86: ideapad-laptop: Disable touchpad_switch | *5e38740ae5Revert "net: macsec: report real_dev features when HW offloading is enabled" | *26b72202eeselftests/bpf: Add verifier test for release_reference() | *8395e3f98cspi: stm32: fix stm32_spi_prepare_mbr() that halves spi clk for every run | *d04722f280wifi: ath11k: Fix QCN9074 firmware boot on x86 | *9cc96a20a9wifi: mac80211: Fix ack frame idr leak when mesh has no route | *86f90014e7wifi: airo: do not assign -1 to unsigned char | *f5558fbda0audit: fix undefined behavior in bit shift for AUDIT_BIT | *af5de982ffriscv: dts: sifive unleashed: Add PWM controlled LEDs | *ee34a19dbewifi: mac80211_hwsim: fix debugfs attribute ps with rc table support | *3513785dc1wifi: mac80211: fix memory free error when registering wiphy fail | *855485d31eceph: fix NULL pointer dereference for req->r_session | *729c9ad294ceph: Use kcalloc for allocating multiple elements | *d276fb4a7ebinder: validate alloc->mm in ->mmap() handler | *5277e3d633x86/sgx: Add overflow check in sgx_validate_offset_length() | *b5a838ba47x86/sgx: Create utility to validate user provided offset and length | *2f6e2de3a5ceph: avoid putting the realm twice when decoding snaps fails | *8bef55d793ceph: do not update snapshot context when there is no new snapshot | *cdee3136c9iio: pressure: ms5611: fixed value compensation bug | *5d6696e79diio: ms5611: Simplify IO callback parameters | *f0ee88e83cnvme-pci: add NVME_QUIRK_BOGUS_NID for Netac NV7000 | *a61716cd24nvme-pci: disable write zeroes on various Kingston SSD | *19b60f3363nvme-pci: disable namespace identifiers for the MAXIO MAP1001 | *d537e19306nvme-pci: add NVME_QUIRK_BOGUS_NID for Micron Nitro | *af03ce894cnvme: add a bogus subsystem NQN quirk for Micron MTFDKBA2T0TFH | *c6803faa6adrm/display: Don't assume dual mode adaptors support i2c sub-addressing | *d2284fe43cata: libata-core: do not issue non-internal commands once EH is pending | *e09583e83eata: libata-scsi: simplify __ata_scsi_queuecmd() | *a9059e338fcifs: Fix connections leak when tlink setup failed | *81d583baa5cifs: support nested dfs links over reconnect | *dbc0ea91becifs: split out dfs code from cifs_reconnect() | *b3ce844d23cifs: introduce new helper for cifs_reconnect() | *2ea600b598sctp: clear out_curr if all frag chunks of current msg are pruned | *1f9f346fbbsctp: remove the unnecessary sinfo_stream check in sctp_prsctp_prune_unsent | *e8915faa9ftty: serial: fsl_lpuart: don't break the on-going transfer when global reset | *bd19013935serial: fsl_lpuart: Fill in rs485_supported | *87c81c19cdserial: Add rs485_supported to uart_port | *c08f4ea79fASoC: fsl_asrc fsl_esai fsl_sai: allow CONFIG_PM=N | *d1e4288d2aASoC: fsl_sai: use local device pointer * |e66b45d527Merge branch 'android13-5.15' into android13-5.15-lts * |72d681a01dRevert "net: use struct_group to copy ip/ipv6 header addresses" * |c46ed1b2d7Merge 5.15.80 into android13-5.15-lts |\| | *71e496bd33Linux 5.15.80 | *b63ddb3ba6ntfs: check overflow when iterating ATTR_RECORDs | *ab6a1bb17entfs: fix out-of-bounds read in ntfs_attr_find() | *5330c423b8ntfs: fix use-after-free in ntfs_attr_find() | *43bbadb7e4net/9p: use a dedicated spinlock for trans_fd | *9357fca9damm: fs: initialize fsdata passed to write_begin/write_end interface | *b334ab4c33wifi: wext: use flex array destination for memcpy() | *0e07032b4b9p/trans_fd: always use O_NONBLOCK read/write | *7c7b7476b5gfs2: Switch from strlcpy to strscpy | *28275a7c84gfs2: Check sb_bsize_shift after reading superblock | *a4f1a01b2e9p: trans_fd/p9_conn_cancel: drop client lock earlier | *f7b0e95071kcm: close race conditions on sk_receive_queue | *27d706b0d3kcm: avoid potential race in kcm_tx_work | *b49026d9c8tcp: cdg: allow tcp_cdg_release() to be called multiple times | *e41cbf98dfmacvlan: enforce a consistent minimal mtu | *d5f7f6e63fInput: i8042 - fix leaking of platform device on module removal | *c49cc2c059kprobes: Skip clearing aggrprobe's post_handler in kprobe-on-ftrace case | *71beab7119scsi: scsi_debug: Fix possible UAF in sdebug_add_host_helper() | *a636772988scsi: target: tcm_loop: Fix possible name leak in tcm_loop_setup_hba_bus() | *cb7893c85enet: use struct_group to copy ip/ipv6 header addresses | *9b8c0c88f4tracing: Fix warning on variable 'struct trace_array' | *73cf0ff9a3ring-buffer: Include dropped pages in counting dirty patches | *35c60b4e8cperf: Improve missing SIGTRAP checking | *2ac6276864serial: 8250_lpss: Use 16B DMA burst with Elkhart Lake | *b1a27b2aadnvme: ensure subsystem reset is single threaded | *bccece3c33nvme: restrict management ioctls to admin | *8cddb0d96bperf/x86/intel/pt: Fix sampling using single range output | *8e2f33c598misc/vmw_vmci: fix an infoleak in vmci_host_do_receive_datagram() | *9a72a46cb0docs: update mediator contact information in CoC doc | *a99a547658mmc: sdhci-pci: Fix possible memory leak caused by missing pci_dev_put() | *4a1b6f7839mmc: sdhci-pci-o2micro: fix card detect fail issue caused by CD# debounce timeout | *fd285d4215mmc: core: properly select voltage range without power cycle | *8a9bae5f1bfirmware: coreboot: Register bus in module init | *052d0e79efiommu/vt-d: Set SRE bit only when hardware has SRS cap | *c31a792a82iommu/vt-d: Preset Access bit for IOVA in FL non-leaf paging entries | *11edbdee43scsi: zfcp: Fix double free of FSF request when qdio send fails | *fdf87b5b30net: phy: marvell: add sleep time after enabling the loopback bit | *9648d760edmaccess: Fix writing offset in case of fault in strncpy_from_kernel_nofault() | *fdd57c20d4Input: iforce - invert valid length check when fetching device IDs | *0cafb719beserial: 8250_lpss: Configure DMA also w/o DMA filter | *59f6596697serial: 8250: Flush DMA Rx on RLSI | *118b52c2aeserial: 8250: Fall back to non-DMA Rx if IIR_RDI occurs | *6ffce7a92edm ioctl: fix misbehavior if list_versions races with module loading | *2b104973f7iio: pressure: ms5611: changed hardcoded SPI speed to value limited | *1678d4abb2iio: adc: mp2629: fix potential array out of bound access | *bd22c232eaiio: adc: mp2629: fix wrong comparison of channel | *656f670613iio: trigger: sysfs: fix possible memory leak in iio_sysfs_trig_init() | *1bf8c0aff8iio: adc: at91_adc: fix possible memory leak in at91_adc_allocate_trigger() | *afc0aea702usb: typec: mux: Enter safe mode only when pins need to be reconfigured | *8236628a54usb: cdns3: host: fix endless superspeed hub port reset | *ead83b0db8usb: chipidea: fix deadlock in ci_otg_del_timer | *cc9e6d8c55usb: add NO_LPM quirk for Realforce 87U Keyboard | *70eca1d261USB: serial: option: add Fibocom FM160 0x0111 composition | *1b6a54885cUSB: serial: option: add u-blox LARA-L6 modem | *b0467d0059USB: serial: option: add u-blox LARA-R6 00B modem | *95688a8a57USB: serial: option: remove old LARA-R6 PID | *53dee78ea3USB: serial: option: add Sierra Wireless EM9191 | *e7764e88e6USB: bcma: Make GPIO explicitly optional | *a190a83db2speakup: fix a segfault caused by switching consoles | *b3c6edbee4slimbus: stream: correct presence rate frequencies | *6b35ac8315slimbus: qcom-ngd: Fix build error when CONFIG_SLIM_QCOM_NGD_CTRL=y && CONFIG_QCOM_RPROC_COMMON=m | *0f847462feRevert "usb: dwc3: disable USB core PHY management" | *23ad214a86ALSA: hda/realtek: Fix the speaker output on Samsung Galaxy Book Pro 360 | *a36b505749ALSA: hda/realtek: fix speakers for Samsung Galaxy Book Pro | *02b94885b2ALSA: usb-audio: Drop snd_BUG_ON() from snd_usbmidi_output_open() | *7176d6f3addrm/amd/display: Add HUBP surface flip interrupt handler | *e57daa7503tracing: kprobe: Fix potential null-ptr-deref on trace_array in kprobe_event_gen_test_exit() | *3a41c0f2a5tracing: kprobe: Fix potential null-ptr-deref on trace_event_file in kprobe_event_gen_test_exit() | *7291dec4f2tracing: Fix race where eprobes can be called before the event | *6517b97134tracing: Fix wild-memory-access in register_synth_event() | *07ba4f0603tracing: Fix memory leak in test_gen_synth_cmd() and test_empty_synth_event() | *8b318f3032tracing/ring-buffer: Have polling block on watermark | *2c21ee020ctracing: Fix memory leak in tracing_read_pipe() | *00f74b1a98ring_buffer: Do not deactivate non-existant pages | *1bea037a1aftrace: Fix null pointer dereference in ftrace_add_mod() | *fadfcf39fbftrace: Optimize the allocation for mcount entries | *5c5f264289ftrace: Fix the possible incorrect kernel message | *2ab2494162cifs: add check for returning value of SMB2_set_info_init | *5783abda58net: thunderbolt: Fix error handling in tbnet_init() | *80e590aeb1net: microchip: sparx5: Fix potential null-ptr-deref in sparx_stats_init() and sparx5_start() | *4a55aec142cifs: Fix wrong return value checking when GETFLAGS | *c8baf1fc24net/x25: Fix skb leak in x25_lapb_receive_frame() | *af4b57fa6bnet: ag71xx: call phylink_disconnect_phy if ag71xx_hw_enable() fail in ag71xx_open() | *61404a182ecifs: add check for returning value of SMB2_close_init | *d3233f4bf3platform/surface: aggregator: Do not check for repeated unsequenced packets | *6969171403platform/x86/intel: pmc: Don't unconditionally attach Intel PMC when virtualized | *7d93417d59drbd: use after free in drbd_create_device() | *fc16a2c81abridge: switchdev: Fix memory leaks when changing VLAN protocol | *3d90a668c4net: hns3: fix setting incorrect phy link ksettings for firmware in resetting process | *3f7b2ef8fenet: ena: Fix error handling in ena_init() | *2540eea1bdnet: ionic: Fix error handling in ionic_init_module() | *c08c13cb13xen/pcpu: fix possible memory leak in register_pcpu() | *97009f07f2net: dsa: make dsa_master_ioctl() see through port_hwtstamp_get() shims | *88da008e5enet: mhi: Fix memory leak in mhi_net_dellink() | *8f839715d0bnxt_en: Remove debugfs when pci_register_driver failed | *b88713d92bnet: caif: fix double disconnect client in chnl_net_open() | *6d24034160net: macvlan: Use built-in RCU list checking | *596230471dmISDN: fix misuse of put_device() in mISDN_register_device() | *07a6a8cf17net: liquidio: release resources when liquidio driver open failed | *19feb6cf41soc: imx8m: Enable OCOTP clock before reading the register | *8c54d706d8net: stmmac: ensure tx function is not running in stmmac_xdp_release() | *6219f46c2bnet: hinic: Fix error handling in hinic_module_init() | *7a05e39296mISDN: fix possible memory leak in mISDN_dsp_element_register() | *0ee6455c9cnet: bgmac: Drop free_netdev() from bgmac_enet_remove() | *7ff4fa179ebpf: Initialize same number of free nodes for each pcpu_freelist | *12f178cf05MIPS: Loongson64: Add WARN_ON on kexec related kmalloc failed | *a4d6e024beMIPS: fix duplicate definitions for exported symbols | *44142b652anfp: change eeprom length to max length enumerators | *f23058dc23ata: libata-transport: fix error handling in ata_tdev_add() | *67b2193146ata: libata-transport: fix error handling in ata_tlink_add() | *e7bb1b7a7bata: libata-transport: fix error handling in ata_tport_add() | *377ff82c33ata: libata-transport: fix double ata_host_put() in ata_tport_add() | *494df0b0efarm64: dts: imx8mn: Fix NAND controller size-cells | *7178d568f7arm64: dts: imx8mm: Fix NAND controller size-cells | *8ccf18c82aARM: dts: imx7: Fix NAND controller size-cells | *e884a6c2d4drm: Fix potential null-ptr-deref in drm_vblank_destroy_worker() | *07e56de876drm/drv: Fix potential memory leak in drm_dev_init() | *45c300613bdrm/panel: simple: set bpc field for logic technologies displays | *779f3f9e0cdrm/vc4: kms: Fix IS_ERR() vs NULL check for vc4_kms | *97e5b508e9pinctrl: devicetree: fix null pointer dereferencing in pinctrl_dt_to_map | *9a77b8557fparport_pc: Avoid FIFO port location truncation | *5d03c2911csiox: fix possible memory leak in siox_device_add() | *530e987a02arm64: Fix bit-shifting UB in the MIDR_CPU_MODEL() macro | *d494449782bpf: Fix memory leaks in __check_func_call | *25521fd2e2block: sed-opal: kmalloc the cmd/resp buffers | *2f21d653c6scsi: scsi_transport_sas: Fix error handling in sas_phy_add() | *7cd28bc410pinctrl: rockchip: list all pins in a possible mux route for PX30 | *ab79b8dbe2ASoC: soc-utils: Remove __exit for snd_soc_util_exit() | *eaa8edd865bpf, test_run: Fix alignment problem in bpf_prog_test_run_skb() | *33cabe04d2tty: n_gsm: fix sleep-in-atomic-context bug in gsm_control_send | *ae22294e21serial: imx: Add missing .thaw_noirq hook | *26db1cd519serial: 8250: omap: Flush PM QOS work on remove | *e0db709a58serial: 8250: omap: Fix unpaired pm_runtime_put_sync() in omap8250_remove() | *83b6d4d6daserial: 8250_omap: remove wait loop from Errata i202 workaround | *76db05ab70serial: 8250: omap: Fix missing PM runtime calls for omap8250_set_mctrl() | *2aee616a6bARM: at91: pm: avoid soft resetting AC DLL | *188546c780ASoC: tas2764: Fix set_tdm_slot in case of single slot | *5782896dafASoC: tas2770: Fix set_tdm_slot in case of single slot | *34eee4189bASoC: core: Fix use-after-free in snd_soc_exit() | *aa6f8aecbbARM: dts: at91: sama7g5: fix signal name of pin PB2 | *487fff700fspi: stm32: Print summary 'callbacks suppressed' message | *2cec2f65c1arm64: dts: qcom: sm8350-hdk: Specify which LDO modes are allowed | *44dbe66bb3arm64: dts: qcom: sm8250-xperia-edo: Specify which LDO modes are allowed | *8b2eae7defarm64: dts: qcom: sm8150-xperia-kumano: Specify which LDO modes are allowed | *c8e76eeea7arm64: dts: qcom: sa8155p-adp: Specify which LDO modes are allowed | *30571f28bbhugetlbfs: don't delete error page from pagecache | *14ddbb83c3KVM: x86/pmu: Do not speculatively query Intel GP PMCs that don't exist yet | *a9b964ed7cspi: intel: Use correct mask for flash and protected regions | *f4eb68642emtd: spi-nor: intel-spi: Disable write protection only if asked | *156d0c823cASoC: codecs: jz4725b: Fix spelling mistake "Sourc" -> "Source", "Routee" -> "Route" | *5907ff9f2cx86/cpu: Add several Intel server CPU model numbers | *41e37d04e3Bluetooth: L2CAP: Fix l2cap_global_chan_by_psm | *b02a025dd1btrfs: remove pointless and double ulist frees in error paths of qgroup tests | *1c366c206fdrm/imx: imx-tve: Fix return type of imx_tve_connector_mode_valid | *1c8ded1b38i2c: i801: add lis3lv02d's I2C address for Vostro 5568 | *b432581f19i2c: tegra: Allocate DMA memory for DMA engine | *7b0ae4c7b9firmware: arm_scmi: Cleanup the core driver removal callback | *1a8a2fef27ACPI: x86: Add another system to quirk list for forcing StorageD3Enable | *8a03a4a5cfNFSv4: Retry LOCK on OLD_STATEID during delegation return | *49ca2227c4btrfs: raid56: properly handle the error when unable to find the missing stripe | *0f7bd3a2dfRDMA/efa: Add EFA 0xefa2 PCI ID | *a42d4363e7ACPI: scan: Add LATT2021 to acpi_ignore_dep_ids[] | *004decd41bdrm/amd/display: Remove wrong pipe control lock | *7779efbb99ASoC: rt1308-sdw: add the default value of some registers | *ef1e4ed858selftests/intel_pstate: fix build for ARCH=x86_64 | *dfd3cc1ef3selftests/futex: fix build for clang | *648467236cASoC: Intel: sof_sdw: add quirk variant for LAPBC710 NUC15 | *64ee750c29ASoC: codecs: jz4725b: fix capture selector naming | *150b74cd06ASoC: codecs: jz4725b: use right control for Capture Volume | *5352d8b315ASoC: codecs: jz4725b: fix reported volume for Master ctl | *85134577a7ASoC: codecs: jz4725b: add missed Line In power control bit | *5e61dffb16spi: intel: Fix the offset to get the 64K erase opcode | *c697cb2e66ASoC: wm8962: Add an event handler for TEMP_HP and TEMP_SPK | *569085124dASoC: rt1019: Fix the TDM settings | *4160a515c7ASoC: mt6660: Keep the pm_runtime enables before component stuff in mt6660_i2c_probe | *2963ec4535ASoC: wm8997: Revert "ASoC: wm8997: Fix PM disable depth imbalance in wm8997_probe" | *30a2f9479cASoC: wm5110: Revert "ASoC: wm5110: Fix PM disable depth imbalance in wm5110_probe" | *3bf6da38a2ASoC: wm5102: Revert "ASoC: wm5102: Fix PM disable depth imbalance in wm5102_probe" | *94fa250ea5mm: shmem: don't truncate page if memory failure happens | *003fa19591mm: hwpoison: handle non-anonymous THP correctly | *a62b1bc603mm: hwpoison: refactor refcount check handling * |49ca4a5978Revert "bpf, sockmap: Fix sk->sk_forward_alloc warn_on in sk_stream_kill_queues" * |6fa2a43acdRevert "ALSA: usb-audio: Yet more regression for for the delayed card registration" * |ac2a7a141fMerge 5.15.79 into android13-5.15-lts |/ *3df0eeae4dLinux 5.15.79 *599b24eedfx86/cpu: Restore AMD's DE_CFG MSR after resume *9132fa043fnet: tun: call napi_schedule_prep() to ensure we own a napi *1dea25e25adrm/amdkfd: Migrate in CPU page fault use current mm *a1c303fbd4marvell: octeontx2: build error: unknown type name 'u64' *d948b22834dmaengine: at_hdmac: Check return code of dma_async_device_register *c556ecf32admaengine: at_hdmac: Fix impossible condition *8a941ff34edmaengine: at_hdmac: Don't allow CPU to reorder channel enable *53831f7a13dmaengine: at_hdmac: Fix completion of unissued descriptor in case of errors *14f5462e4admaengine: at_hdmac: Fix descriptor handling when issuing it to hardware *5482403228dmaengine: at_hdmac: Fix concurrency over the active list *82ca19414fdmaengine: at_hdmac: Free the memset buf without holding the chan lock *8fd36e069ddmaengine: at_hdmac: Fix concurrency over descriptor *1ee012d452dmaengine: at_hdmac: Fix concurrency problems by removing atc_complete_all() *90c1b07406dmaengine: at_hdmac: Protect atchan->status with the channel lock *b5ee1fe06admaengine: at_hdmac: Do not call the complete callback on device_terminate_all *9bbf5df0fcdmaengine: at_hdmac: Fix premature completion of desc in issue_pending *f7d1aaa903dmaengine: at_hdmac: Start transfer for cyclic channels in issue_pending *e9777b4efcdmaengine: at_hdmac: Don't start transactions at tx_submit level *4e28674a0edmaengine: at_hdmac: Fix at_lli struct definition *49eba53137cert host tools: Stop complaining about deprecated OpenSSL functions *69e86c6268can: j1939: j1939_send_one(): fix missing CAN header initialization *81fc8f90b8mm/shmem: use page_mapping() to detect page cache for uffd continue *e91451af11mm/memremap.c: map FS_DAX device memory as decrypted *48998c1773mm/damon/dbgfs: check if rm_contexts input is for a real context *c736ed8541udf: Fix a slab-out-of-bounds write bug in udf_find_entry() *2e87eddf57mms: sdhci-esdhc-imx: Fix SDHCI_RESET_ALL for CQHCI *91c38504e5btrfs: zoned: initialize device's zone info for seeding *432c30ba3fbtrfs: selftests: fix wrong error check in btrfs_free_dummy_root() *c9fe4719c6btrfs: fix match incorrectly in dev_args_match_device *f96fd36936wifi: ath11k: avoid deadlock during regulatory update in ath11k_regd_update() *8e2b576cafplatform/x86: hp_wmi: Fix rfkill causing soft blocked wifi *cb3ab0e1e0drm/amdgpu: disable BACO on special BEIGE_GOBY card *dc066a7850drm/i915/dmabuf: fix sg_table handling in map_dma_buf *afbd118838nilfs2: fix use-after-free bug of ns_writer on remount *abc082aac0nilfs2: fix deadlock in nilfs_count_free_blocks() *589da22881ata: libata-scsi: fix SYNCHRONIZE CACHE (16) command failure *51ae4579a5vmlinux.lds.h: Fix placement of '.data..decrypted' section *1f8e08ab32ALSA: usb-audio: Add DSD support for Accuphase DAC-60 *c2451f62b2ALSA: usb-audio: Add quirk entry for M-Audio Micro *031d1480a0ALSA: usb-audio: Yet more regression for for the delayed card registration *574f51e4aaALSA: hda/realtek: Add Positivo C6300 model quirk *7140d7aaf9ALSA: hda: fix potential memleak in 'add_widget_node' *f6d7a487aaALSA: hda/ca0132: add quirk for EVGA Z390 DARK *1ccd55b390ALSA: hda/hdmi - enable runtime pm for more AMD display audio *29100c6742mmc: sdhci-esdhc-imx: use the correct host caps for MMC_CAP_8_BIT_DATA *3dce99e2ebmmc: sdhci-tegra: Fix SDHCI_RESET_ALL for CQHCI *9d6bd33e6ammc: sdhci_am654: Fix SDHCI_RESET_ALL for CQHCI *ad01f16ca9mmc: sdhci-of-arasan: Fix SDHCI_RESET_ALL for CQHCI *1aa78c1d01mmc: cqhci: Provide helper for resetting both SDHCI and CQHCI *c198524a99MIPS: jump_label: Fix compat branch range check *9713ceffa4arm64: efi: Fix handling of misaligned runtime regions and drop warning *518e49f059riscv: fix reserved memory setup *d07c3d7491riscv: vdso: fix build with llvm *cc36c7fa5driscv: process: fix kernel info leakage *a8d67367abnet: macvlan: fix memory leaks of macvlan_common_newlink *7b194dd32bethernet: tundra: free irq when alloc ring failed in tsi108_open() *7de10342fenet: mv643xx_eth: disable napi when init rxq or txq failed in mv643xx_eth_open() *88e1dd2d92ethernet: s2io: disable napi when start nic failed in s2io_card_up() *3652f1f8d3net: atlantic: macsec: clear encryption keys from the stack *fca3b0a1fdnet: phy: mscc: macsec: clear encryption keys when freeing a flow *60a0af8813stmmac: dwmac-loongson: fix missing of_node_put() while module exiting *ee4a9bd2c7stmmac: dwmac-loongson: fix missing pci_disable_device() in loongson_dwmac_probe() *4a8770eebcstmmac: dwmac-loongson: fix missing pci_disable_msi() while module exiting *83196d8dc5cxgb4vf: shut down the adapter when t4vf_update_port_info() failed in cxgb4vf_open() *49d8a6e24amctp: Fix an error handling path in mctp_init() *29961d2332stmmac: intel: Update PCH PTP clock rate from 200MHz to 204.8MHz *8604bebc5cstmmac: intel: Enable 2.5Gbps for Intel AlderLake-S *7dec6dae2bnet: cxgb3_main: disable napi when bind qsets failed in cxgb_up() *960f9d30denet: cpsw: disable napi in cpsw_ndo_open() *1360778fdbnet/mlx5e: E-Switch, Fix comparing termination table instance *f13e9ebd29net/mlx5: Allow async trigger completion execution on single CPU systems *48b73b46a5net/mlx5: Bridge, verify LAG state when adding bond to bridge *13b1ea861enet: wwan: iosm: fix memory leak in ipc_pcie_read_bios_cfg *7e4dcacb4dnet: nixge: disable napi when enable interrupts failed in nixge_open() *409731df63net: marvell: prestera: fix memory leak in prestera_rxtx_switch_init() *77ff31cba9netfilter: Cleanup nft_net->module_list from nf_tables_exit_net() *e62cb1c093netfilter: nfnetlink: fix potential dead lock in nfnetlink_rcv_msg() *0bd20318daperf tools: Add the include/perf/ directory to .gitignore *a733671e38perf stat: Fix printing os->prefix in CSV metrics output *c36e9e2c4adrivers: net: xgene: disable napi when register irq failed in xgene_enet_open() *4689bd3a1bnet: lapbether: fix issue of invalid opcode in lapbeth_open() *1dd27541aadmaengine: ti: k3-udma-glue: fix memory leak when register device fail *992e966cafdmaengine: mv_xor_v2: Fix a resource leak in mv_xor_v2_remove() *9766af75badmaengine: pxa_dma: use platform_get_irq_optional *301caa0609tipc: fix the msg->req tlv len check in tipc_nl_compat_name_table_dump_header *6a264203dbnet: broadcom: Fix BCMGENET Kconfig *e7871b9a21net: stmmac: dwmac-meson8b: fix meson8b_devm_clk_prepare_enable() *261178a1c2can: af_can: fix NULL pointer dereference in can_rx_register() *2acb2779b1ipv6: addrlabel: fix infoleak when sending struct ifaddrlblmsg to network *13ecaa6832tcp: prohibit TCP_REPAIR_OPTIONS if data was already sent *bc79cb9fb0drm/vc4: Fix missing platform_unregister_drivers() call in vc4_drm_register() *2845bc9070net: wwan: mhi: fix memory leak in mhi_mbim_dellink *2ce2348c28net: wwan: iosm: fix memory leak in ipc_wwan_dellink *7b6bc50f65hamradio: fix issue of dev reference count leakage in bpq_device_event() *f59adebb8cnet: lapbether: fix issue of dev reference count leakage in lapbeth_device_event() *119407dc32KVM: s390: pv: don't allow userspace to set the clock under PV *500bcd3a99phy: ralink: mt7621-pci: add sentinel to quirks table *151dc8087bcapabilities: fix undefined behavior in bit shift for CAP_TO_MASK *435c7ddfd5net: fman: Unregister ethernet device on removal *3a504d6d96bnxt_en: fix potentially incorrect return value for ndo_rx_flow_steer *ac257c43fabnxt_en: Fix possible crash in bnxt_hwrm_set_coal() *d7569302a7net: tun: Fix memory leaks of napi_get_frags *430d1f4964octeontx2-pf: NIX TX overwrites SQ_CTX_HW_S[SQ_INT] *ec0db81883octeontx2-pf: Use hardware register for CQE count *b89a0d8859macsec: clear encryption keys from the stack after setting up offload *eeba7f07a0macsec: fix detection of RXSCs when toggling offloading *3070a880ebmacsec: fix secy->n_rx_sc accounting *e957555a36macsec: delete new rxsc when offload fails *ad25a115f5net: gso: fix panic on frag_list with mixed head alloc types *466ce46f25bpf: Fix wrong reg type conversion in release_reference() *35d8130f2abpf: Add helper macro bpf_for_each_reg_in_vstate *61274498fbbpf, sock_map: Move cancel_work_sync() out of sock lock *32b5dd03bebpf: Fix sockmap calling sleepable function in teardown path *e991558189bpf, sockmap: Fix sk->sk_forward_alloc warn_on in sk_stream_kill_queues *5ad95d7134HID: hyperv: fix possible memory leak in mousevsc_probe() *6dcdd1b68bbpftool: Fix NULL pointer dereference when pin {PROG, MAP, LINK} without FILE *2fc902245cwifi: mac80211: Set TWT Information Frame Disabled bit as 1 *95adbd2ac8bpf, sockmap: Fix the sk->sk_forward_alloc warning of sk_stream_kill_queues *06615967d4bpf, verifier: Fix memory leak in array reallocation for stack state *4335a82c4fsoundwire: qcom: check for outanding writes before doing a read *ae4dad2e53soundwire: qcom: reinit broadcast completion *38c9fa2cc6wifi: cfg80211: fix memory leak in query_regdb_file() *2c6ba0a787wifi: cfg80211: silence a sparse RCU warning *921738c280phy: stm32: fix an error code in probe *fa722006f7hwspinlock: qcom: correct MMIO max register for newer SoCs *3c1bb6187edrm/amdkfd: Fix NULL pointer dereference in svm_migrate_to_ram() *b1f8522771drm/amdkfd: handle CPU fault on COW mapping *36770c045adrm/amdkfd: avoid recursive lock in migrations back to RAM *93a5de7e88fuse: fix readdir cache race *1920cf9454thunderbolt: Add DP OUT resource when DP tunnel is discovered *47dbf24969thunderbolt: Tear down existing tunnels when resuming from hibernate And update the .xml file with the new symbol that we are tracking and the abi preservation fix: 1 function symbol(s) added 'void __dev_kfree_skb_irq(struct sk_buff *, enum skb_free_reason)' type 'struct sdhci_host' changed member 'union { struct { u8 reinit_uhs; u8 reserve01; u8 drv_type; u16 reserve02; u32 reserve03; }; struct { u64 android_kabi_reserved1; }; union { }; }' was added member 'u64 android_kabi_reserved1' was removed Change-Id: If4a059230a137dee54298fff61ec87306bf96b0f Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
11165 lines
282 KiB
C
11165 lines
282 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* kernel/sched/core.c
|
|
*
|
|
* Core kernel scheduler code and related syscalls
|
|
*
|
|
* Copyright (C) 1991-2002 Linus Torvalds
|
|
*/
|
|
#define CREATE_TRACE_POINTS
|
|
#include <trace/events/sched.h>
|
|
#undef CREATE_TRACE_POINTS
|
|
|
|
#include "sched.h"
|
|
|
|
#include <linux/nospec.h>
|
|
|
|
#include <linux/kcov.h>
|
|
#include <linux/scs.h>
|
|
|
|
#include <asm/switch_to.h>
|
|
#include <asm/tlb.h>
|
|
|
|
#include "../workqueue_internal.h"
|
|
#include "../../io_uring/io-wq.h"
|
|
#include "../smpboot.h"
|
|
|
|
#include "pelt.h"
|
|
#include "smp.h"
|
|
|
|
#include <trace/hooks/sched.h>
|
|
#include <trace/hooks/dtask.h>
|
|
#include <trace/hooks/cgroup.h>
|
|
|
|
/*
|
|
* Export tracepoints that act as a bare tracehook (ie: have no trace event
|
|
* associated with them) to allow external modules to probe them.
|
|
*/
|
|
EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
|
|
EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
|
|
EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
|
|
EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
|
|
EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
|
|
EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_thermal_tp);
|
|
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
|
|
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
|
|
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
|
|
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
|
|
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
|
|
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_switch);
|
|
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_waking);
|
|
#ifdef CONFIG_SCHEDSTATS
|
|
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_sleep);
|
|
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_wait);
|
|
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_iowait);
|
|
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_blocked);
|
|
#endif
|
|
|
|
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
|
|
EXPORT_SYMBOL_GPL(runqueues);
|
|
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
/*
|
|
* Debugging: various feature bits
|
|
*
|
|
* If SCHED_DEBUG is disabled, each compilation unit has its own copy of
|
|
* sysctl_sched_features, defined in sched.h, to allow constants propagation
|
|
* at compile time and compiler optimization based on features default.
|
|
*/
|
|
#define SCHED_FEAT(name, enabled) \
|
|
(1UL << __SCHED_FEAT_##name) * enabled |
|
|
const_debug unsigned int sysctl_sched_features =
|
|
#include "features.h"
|
|
0;
|
|
EXPORT_SYMBOL_GPL(sysctl_sched_features);
|
|
#undef SCHED_FEAT
|
|
|
|
/*
|
|
* Print a warning if need_resched is set for the given duration (if
|
|
* LATENCY_WARN is enabled).
|
|
*
|
|
* If sysctl_resched_latency_warn_once is set, only one warning will be shown
|
|
* per boot.
|
|
*/
|
|
__read_mostly int sysctl_resched_latency_warn_ms = 100;
|
|
__read_mostly int sysctl_resched_latency_warn_once = 1;
|
|
#endif /* CONFIG_SCHED_DEBUG */
|
|
|
|
/*
|
|
* Number of tasks to iterate in a single balance run.
|
|
* Limited because this is done with IRQs disabled.
|
|
*/
|
|
const_debug unsigned int sysctl_sched_nr_migrate = 32;
|
|
|
|
/*
|
|
* period over which we measure -rt task CPU usage in us.
|
|
* default: 1s
|
|
*/
|
|
unsigned int sysctl_sched_rt_period = 1000000;
|
|
|
|
__read_mostly int scheduler_running;
|
|
|
|
#ifdef CONFIG_SCHED_CORE
|
|
|
|
DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
|
|
|
|
/* kernel prio, less is more */
|
|
static inline int __task_prio(struct task_struct *p)
|
|
{
|
|
if (p->sched_class == &stop_sched_class) /* trumps deadline */
|
|
return -2;
|
|
|
|
if (rt_prio(p->prio)) /* includes deadline */
|
|
return p->prio; /* [-1, 99] */
|
|
|
|
if (p->sched_class == &idle_sched_class)
|
|
return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
|
|
|
|
return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */
|
|
}
|
|
|
|
/*
|
|
* l(a,b)
|
|
* le(a,b) := !l(b,a)
|
|
* g(a,b) := l(b,a)
|
|
* ge(a,b) := !l(a,b)
|
|
*/
|
|
|
|
/* real prio, less is less */
|
|
static inline bool prio_less(struct task_struct *a, struct task_struct *b, bool in_fi)
|
|
{
|
|
|
|
int pa = __task_prio(a), pb = __task_prio(b);
|
|
|
|
if (-pa < -pb)
|
|
return true;
|
|
|
|
if (-pb < -pa)
|
|
return false;
|
|
|
|
if (pa == -1) /* dl_prio() doesn't work because of stop_class above */
|
|
return !dl_time_before(a->dl.deadline, b->dl.deadline);
|
|
|
|
if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */
|
|
return cfs_prio_less(a, b, in_fi);
|
|
|
|
return false;
|
|
}
|
|
|
|
static inline bool __sched_core_less(struct task_struct *a, struct task_struct *b)
|
|
{
|
|
if (a->core_cookie < b->core_cookie)
|
|
return true;
|
|
|
|
if (a->core_cookie > b->core_cookie)
|
|
return false;
|
|
|
|
/* flip prio, so high prio is leftmost */
|
|
if (prio_less(b, a, task_rq(a)->core->core_forceidle))
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
#define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
|
|
|
|
static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
|
|
{
|
|
return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
|
|
}
|
|
|
|
static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
|
|
{
|
|
const struct task_struct *p = __node_2_sc(node);
|
|
unsigned long cookie = (unsigned long)key;
|
|
|
|
if (cookie < p->core_cookie)
|
|
return -1;
|
|
|
|
if (cookie > p->core_cookie)
|
|
return 1;
|
|
|
|
return 0;
|
|
}
|
|
|
|
void sched_core_enqueue(struct rq *rq, struct task_struct *p)
|
|
{
|
|
rq->core->core_task_seq++;
|
|
|
|
if (!p->core_cookie)
|
|
return;
|
|
|
|
rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
|
|
}
|
|
|
|
void sched_core_dequeue(struct rq *rq, struct task_struct *p)
|
|
{
|
|
rq->core->core_task_seq++;
|
|
|
|
if (!sched_core_enqueued(p))
|
|
return;
|
|
|
|
rb_erase(&p->core_node, &rq->core_tree);
|
|
RB_CLEAR_NODE(&p->core_node);
|
|
}
|
|
|
|
/*
|
|
* Find left-most (aka, highest priority) task matching @cookie.
|
|
*/
|
|
static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
|
|
{
|
|
struct rb_node *node;
|
|
|
|
node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
|
|
/*
|
|
* The idle task always matches any cookie!
|
|
*/
|
|
if (!node)
|
|
return idle_sched_class.pick_task(rq);
|
|
|
|
return __node_2_sc(node);
|
|
}
|
|
|
|
static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
|
|
{
|
|
struct rb_node *node = &p->core_node;
|
|
|
|
node = rb_next(node);
|
|
if (!node)
|
|
return NULL;
|
|
|
|
p = container_of(node, struct task_struct, core_node);
|
|
if (p->core_cookie != cookie)
|
|
return NULL;
|
|
|
|
return p;
|
|
}
|
|
|
|
/*
|
|
* Magic required such that:
|
|
*
|
|
* raw_spin_rq_lock(rq);
|
|
* ...
|
|
* raw_spin_rq_unlock(rq);
|
|
*
|
|
* ends up locking and unlocking the _same_ lock, and all CPUs
|
|
* always agree on what rq has what lock.
|
|
*
|
|
* XXX entirely possible to selectively enable cores, don't bother for now.
|
|
*/
|
|
|
|
static DEFINE_MUTEX(sched_core_mutex);
|
|
static atomic_t sched_core_count;
|
|
static struct cpumask sched_core_mask;
|
|
|
|
static void sched_core_lock(int cpu, unsigned long *flags)
|
|
{
|
|
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
|
|
int t, i = 0;
|
|
|
|
local_irq_save(*flags);
|
|
for_each_cpu(t, smt_mask)
|
|
raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
|
|
}
|
|
|
|
static void sched_core_unlock(int cpu, unsigned long *flags)
|
|
{
|
|
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
|
|
int t;
|
|
|
|
for_each_cpu(t, smt_mask)
|
|
raw_spin_unlock(&cpu_rq(t)->__lock);
|
|
local_irq_restore(*flags);
|
|
}
|
|
|
|
static void __sched_core_flip(bool enabled)
|
|
{
|
|
unsigned long flags;
|
|
int cpu, t;
|
|
|
|
cpus_read_lock();
|
|
|
|
/*
|
|
* Toggle the online cores, one by one.
|
|
*/
|
|
cpumask_copy(&sched_core_mask, cpu_online_mask);
|
|
for_each_cpu(cpu, &sched_core_mask) {
|
|
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
|
|
|
|
sched_core_lock(cpu, &flags);
|
|
|
|
for_each_cpu(t, smt_mask)
|
|
cpu_rq(t)->core_enabled = enabled;
|
|
|
|
sched_core_unlock(cpu, &flags);
|
|
|
|
cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
|
|
}
|
|
|
|
/*
|
|
* Toggle the offline CPUs.
|
|
*/
|
|
cpumask_copy(&sched_core_mask, cpu_possible_mask);
|
|
cpumask_andnot(&sched_core_mask, &sched_core_mask, cpu_online_mask);
|
|
|
|
for_each_cpu(cpu, &sched_core_mask)
|
|
cpu_rq(cpu)->core_enabled = enabled;
|
|
|
|
cpus_read_unlock();
|
|
}
|
|
|
|
static void sched_core_assert_empty(void)
|
|
{
|
|
int cpu;
|
|
|
|
for_each_possible_cpu(cpu)
|
|
WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
|
|
}
|
|
|
|
static void __sched_core_enable(void)
|
|
{
|
|
static_branch_enable(&__sched_core_enabled);
|
|
/*
|
|
* Ensure all previous instances of raw_spin_rq_*lock() have finished
|
|
* and future ones will observe !sched_core_disabled().
|
|
*/
|
|
synchronize_rcu();
|
|
__sched_core_flip(true);
|
|
sched_core_assert_empty();
|
|
}
|
|
|
|
static void __sched_core_disable(void)
|
|
{
|
|
sched_core_assert_empty();
|
|
__sched_core_flip(false);
|
|
static_branch_disable(&__sched_core_enabled);
|
|
}
|
|
|
|
void sched_core_get(void)
|
|
{
|
|
if (atomic_inc_not_zero(&sched_core_count))
|
|
return;
|
|
|
|
mutex_lock(&sched_core_mutex);
|
|
if (!atomic_read(&sched_core_count))
|
|
__sched_core_enable();
|
|
|
|
smp_mb__before_atomic();
|
|
atomic_inc(&sched_core_count);
|
|
mutex_unlock(&sched_core_mutex);
|
|
}
|
|
|
|
static void __sched_core_put(struct work_struct *work)
|
|
{
|
|
if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
|
|
__sched_core_disable();
|
|
mutex_unlock(&sched_core_mutex);
|
|
}
|
|
}
|
|
|
|
void sched_core_put(void)
|
|
{
|
|
static DECLARE_WORK(_work, __sched_core_put);
|
|
|
|
/*
|
|
* "There can be only one"
|
|
*
|
|
* Either this is the last one, or we don't actually need to do any
|
|
* 'work'. If it is the last *again*, we rely on
|
|
* WORK_STRUCT_PENDING_BIT.
|
|
*/
|
|
if (!atomic_add_unless(&sched_core_count, -1, 1))
|
|
schedule_work(&_work);
|
|
}
|
|
|
|
#else /* !CONFIG_SCHED_CORE */
|
|
|
|
static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
|
|
static inline void sched_core_dequeue(struct rq *rq, struct task_struct *p) { }
|
|
|
|
#endif /* CONFIG_SCHED_CORE */
|
|
|
|
/*
|
|
* part of the period that we allow rt tasks to run in us.
|
|
* default: 0.95s
|
|
*/
|
|
int sysctl_sched_rt_runtime = 950000;
|
|
|
|
|
|
/*
|
|
* Serialization rules:
|
|
*
|
|
* Lock order:
|
|
*
|
|
* p->pi_lock
|
|
* rq->lock
|
|
* hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
|
|
*
|
|
* rq1->lock
|
|
* rq2->lock where: rq1 < rq2
|
|
*
|
|
* Regular state:
|
|
*
|
|
* Normal scheduling state is serialized by rq->lock. __schedule() takes the
|
|
* local CPU's rq->lock, it optionally removes the task from the runqueue and
|
|
* always looks at the local rq data structures to find the most eligible task
|
|
* to run next.
|
|
*
|
|
* Task enqueue is also under rq->lock, possibly taken from another CPU.
|
|
* Wakeups from another LLC domain might use an IPI to transfer the enqueue to
|
|
* the local CPU to avoid bouncing the runqueue state around [ see
|
|
* ttwu_queue_wakelist() ]
|
|
*
|
|
* Task wakeup, specifically wakeups that involve migration, are horribly
|
|
* complicated to avoid having to take two rq->locks.
|
|
*
|
|
* Special state:
|
|
*
|
|
* System-calls and anything external will use task_rq_lock() which acquires
|
|
* both p->pi_lock and rq->lock. As a consequence the state they change is
|
|
* stable while holding either lock:
|
|
*
|
|
* - sched_setaffinity()/
|
|
* set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
|
|
* - set_user_nice(): p->se.load, p->*prio
|
|
* - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
|
|
* p->se.load, p->rt_priority,
|
|
* p->dl.dl_{runtime, deadline, period, flags, bw, density}
|
|
* - sched_setnuma(): p->numa_preferred_nid
|
|
* - sched_move_task()/
|
|
* cpu_cgroup_fork(): p->sched_task_group
|
|
* - uclamp_update_active() p->uclamp*
|
|
*
|
|
* p->state <- TASK_*:
|
|
*
|
|
* is changed locklessly using set_current_state(), __set_current_state() or
|
|
* set_special_state(), see their respective comments, or by
|
|
* try_to_wake_up(). This latter uses p->pi_lock to serialize against
|
|
* concurrent self.
|
|
*
|
|
* p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
|
|
*
|
|
* is set by activate_task() and cleared by deactivate_task(), under
|
|
* rq->lock. Non-zero indicates the task is runnable, the special
|
|
* ON_RQ_MIGRATING state is used for migration without holding both
|
|
* rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
|
|
*
|
|
* p->on_cpu <- { 0, 1 }:
|
|
*
|
|
* is set by prepare_task() and cleared by finish_task() such that it will be
|
|
* set before p is scheduled-in and cleared after p is scheduled-out, both
|
|
* under rq->lock. Non-zero indicates the task is running on its CPU.
|
|
*
|
|
* [ The astute reader will observe that it is possible for two tasks on one
|
|
* CPU to have ->on_cpu = 1 at the same time. ]
|
|
*
|
|
* task_cpu(p): is changed by set_task_cpu(), the rules are:
|
|
*
|
|
* - Don't call set_task_cpu() on a blocked task:
|
|
*
|
|
* We don't care what CPU we're not running on, this simplifies hotplug,
|
|
* the CPU assignment of blocked tasks isn't required to be valid.
|
|
*
|
|
* - for try_to_wake_up(), called under p->pi_lock:
|
|
*
|
|
* This allows try_to_wake_up() to only take one rq->lock, see its comment.
|
|
*
|
|
* - for migration called under rq->lock:
|
|
* [ see task_on_rq_migrating() in task_rq_lock() ]
|
|
*
|
|
* o move_queued_task()
|
|
* o detach_task()
|
|
*
|
|
* - for migration called under double_rq_lock():
|
|
*
|
|
* o __migrate_swap_task()
|
|
* o push_rt_task() / pull_rt_task()
|
|
* o push_dl_task() / pull_dl_task()
|
|
* o dl_task_offline_migration()
|
|
*
|
|
*/
|
|
|
|
void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
|
|
{
|
|
raw_spinlock_t *lock;
|
|
|
|
/* Matches synchronize_rcu() in __sched_core_enable() */
|
|
preempt_disable();
|
|
if (sched_core_disabled()) {
|
|
raw_spin_lock_nested(&rq->__lock, subclass);
|
|
/* preempt_count *MUST* be > 1 */
|
|
preempt_enable_no_resched();
|
|
return;
|
|
}
|
|
|
|
for (;;) {
|
|
lock = __rq_lockp(rq);
|
|
raw_spin_lock_nested(lock, subclass);
|
|
if (likely(lock == __rq_lockp(rq))) {
|
|
/* preempt_count *MUST* be > 1 */
|
|
preempt_enable_no_resched();
|
|
return;
|
|
}
|
|
raw_spin_unlock(lock);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(raw_spin_rq_lock_nested);
|
|
|
|
bool raw_spin_rq_trylock(struct rq *rq)
|
|
{
|
|
raw_spinlock_t *lock;
|
|
bool ret;
|
|
|
|
/* Matches synchronize_rcu() in __sched_core_enable() */
|
|
preempt_disable();
|
|
if (sched_core_disabled()) {
|
|
ret = raw_spin_trylock(&rq->__lock);
|
|
preempt_enable();
|
|
return ret;
|
|
}
|
|
|
|
for (;;) {
|
|
lock = __rq_lockp(rq);
|
|
ret = raw_spin_trylock(lock);
|
|
if (!ret || (likely(lock == __rq_lockp(rq)))) {
|
|
preempt_enable();
|
|
return ret;
|
|
}
|
|
raw_spin_unlock(lock);
|
|
}
|
|
}
|
|
|
|
void raw_spin_rq_unlock(struct rq *rq)
|
|
{
|
|
raw_spin_unlock(rq_lockp(rq));
|
|
}
|
|
EXPORT_SYMBOL_GPL(raw_spin_rq_unlock);
|
|
|
|
#ifdef CONFIG_SMP
|
|
/*
|
|
* double_rq_lock - safely lock two runqueues
|
|
*/
|
|
void double_rq_lock(struct rq *rq1, struct rq *rq2)
|
|
{
|
|
lockdep_assert_irqs_disabled();
|
|
|
|
if (rq_order_less(rq2, rq1))
|
|
swap(rq1, rq2);
|
|
|
|
raw_spin_rq_lock(rq1);
|
|
if (__rq_lockp(rq1) != __rq_lockp(rq2))
|
|
raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
|
|
|
|
double_rq_clock_clear_update(rq1, rq2);
|
|
}
|
|
EXPORT_SYMBOL_GPL(double_rq_lock);
|
|
#endif
|
|
|
|
/*
|
|
* __task_rq_lock - lock the rq @p resides on.
|
|
*/
|
|
struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
|
|
__acquires(rq->lock)
|
|
{
|
|
struct rq *rq;
|
|
|
|
lockdep_assert_held(&p->pi_lock);
|
|
|
|
for (;;) {
|
|
rq = task_rq(p);
|
|
raw_spin_rq_lock(rq);
|
|
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
|
|
rq_pin_lock(rq, rf);
|
|
return rq;
|
|
}
|
|
raw_spin_rq_unlock(rq);
|
|
|
|
while (unlikely(task_on_rq_migrating(p)))
|
|
cpu_relax();
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(__task_rq_lock);
|
|
|
|
/*
|
|
* task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
|
|
*/
|
|
struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
|
|
__acquires(p->pi_lock)
|
|
__acquires(rq->lock)
|
|
{
|
|
struct rq *rq;
|
|
|
|
for (;;) {
|
|
raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
|
|
rq = task_rq(p);
|
|
raw_spin_rq_lock(rq);
|
|
/*
|
|
* move_queued_task() task_rq_lock()
|
|
*
|
|
* ACQUIRE (rq->lock)
|
|
* [S] ->on_rq = MIGRATING [L] rq = task_rq()
|
|
* WMB (__set_task_cpu()) ACQUIRE (rq->lock);
|
|
* [S] ->cpu = new_cpu [L] task_rq()
|
|
* [L] ->on_rq
|
|
* RELEASE (rq->lock)
|
|
*
|
|
* If we observe the old CPU in task_rq_lock(), the acquire of
|
|
* the old rq->lock will fully serialize against the stores.
|
|
*
|
|
* If we observe the new CPU in task_rq_lock(), the address
|
|
* dependency headed by '[L] rq = task_rq()' and the acquire
|
|
* will pair with the WMB to ensure we then also see migrating.
|
|
*/
|
|
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
|
|
rq_pin_lock(rq, rf);
|
|
return rq;
|
|
}
|
|
raw_spin_rq_unlock(rq);
|
|
raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
|
|
|
|
while (unlikely(task_on_rq_migrating(p)))
|
|
cpu_relax();
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(task_rq_lock);
|
|
|
|
/*
|
|
* RQ-clock updating methods:
|
|
*/
|
|
|
|
static void update_rq_clock_task(struct rq *rq, s64 delta)
|
|
{
|
|
/*
|
|
* In theory, the compile should just see 0 here, and optimize out the call
|
|
* to sched_rt_avg_update. But I don't trust it...
|
|
*/
|
|
s64 __maybe_unused steal = 0, irq_delta = 0;
|
|
|
|
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
|
irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
|
|
|
|
/*
|
|
* Since irq_time is only updated on {soft,}irq_exit, we might run into
|
|
* this case when a previous update_rq_clock() happened inside a
|
|
* {soft,}irq region.
|
|
*
|
|
* When this happens, we stop ->clock_task and only update the
|
|
* prev_irq_time stamp to account for the part that fit, so that a next
|
|
* update will consume the rest. This ensures ->clock_task is
|
|
* monotonic.
|
|
*
|
|
* It does however cause some slight miss-attribution of {soft,}irq
|
|
* time, a more accurate solution would be to update the irq_time using
|
|
* the current rq->clock timestamp, except that would require using
|
|
* atomic ops.
|
|
*/
|
|
if (irq_delta > delta)
|
|
irq_delta = delta;
|
|
|
|
rq->prev_irq_time += irq_delta;
|
|
delta -= irq_delta;
|
|
#endif
|
|
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
|
|
if (static_key_false((¶virt_steal_rq_enabled))) {
|
|
steal = paravirt_steal_clock(cpu_of(rq));
|
|
steal -= rq->prev_steal_time_rq;
|
|
|
|
if (unlikely(steal > delta))
|
|
steal = delta;
|
|
|
|
rq->prev_steal_time_rq += steal;
|
|
delta -= steal;
|
|
}
|
|
#endif
|
|
|
|
rq->clock_task += delta;
|
|
|
|
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
|
|
if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
|
|
update_irq_load_avg(rq, irq_delta + steal);
|
|
#endif
|
|
update_rq_clock_pelt(rq, delta);
|
|
}
|
|
|
|
void update_rq_clock(struct rq *rq)
|
|
{
|
|
s64 delta;
|
|
|
|
lockdep_assert_rq_held(rq);
|
|
|
|
if (rq->clock_update_flags & RQCF_ACT_SKIP)
|
|
return;
|
|
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
if (sched_feat(WARN_DOUBLE_CLOCK))
|
|
SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
|
|
rq->clock_update_flags |= RQCF_UPDATED;
|
|
#endif
|
|
|
|
delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
|
|
if (delta < 0)
|
|
return;
|
|
rq->clock += delta;
|
|
update_rq_clock_task(rq, delta);
|
|
}
|
|
EXPORT_SYMBOL_GPL(update_rq_clock);
|
|
|
|
#ifdef CONFIG_SCHED_HRTICK
|
|
/*
|
|
* Use HR-timers to deliver accurate preemption points.
|
|
*/
|
|
|
|
static void hrtick_clear(struct rq *rq)
|
|
{
|
|
if (hrtimer_active(&rq->hrtick_timer))
|
|
hrtimer_cancel(&rq->hrtick_timer);
|
|
}
|
|
|
|
/*
|
|
* High-resolution timer tick.
|
|
* Runs from hardirq context with interrupts disabled.
|
|
*/
|
|
static enum hrtimer_restart hrtick(struct hrtimer *timer)
|
|
{
|
|
struct rq *rq = container_of(timer, struct rq, hrtick_timer);
|
|
struct rq_flags rf;
|
|
|
|
WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
|
|
|
|
rq_lock(rq, &rf);
|
|
update_rq_clock(rq);
|
|
rq->curr->sched_class->task_tick(rq, rq->curr, 1);
|
|
rq_unlock(rq, &rf);
|
|
|
|
return HRTIMER_NORESTART;
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static void __hrtick_restart(struct rq *rq)
|
|
{
|
|
struct hrtimer *timer = &rq->hrtick_timer;
|
|
ktime_t time = rq->hrtick_time;
|
|
|
|
hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
|
|
}
|
|
|
|
/*
|
|
* called from hardirq (IPI) context
|
|
*/
|
|
static void __hrtick_start(void *arg)
|
|
{
|
|
struct rq *rq = arg;
|
|
struct rq_flags rf;
|
|
|
|
rq_lock(rq, &rf);
|
|
__hrtick_restart(rq);
|
|
rq_unlock(rq, &rf);
|
|
}
|
|
|
|
/*
|
|
* Called to set the hrtick timer state.
|
|
*
|
|
* called with rq->lock held and irqs disabled
|
|
*/
|
|
void hrtick_start(struct rq *rq, u64 delay)
|
|
{
|
|
struct hrtimer *timer = &rq->hrtick_timer;
|
|
s64 delta;
|
|
|
|
/*
|
|
* Don't schedule slices shorter than 10000ns, that just
|
|
* doesn't make sense and can cause timer DoS.
|
|
*/
|
|
delta = max_t(s64, delay, 10000LL);
|
|
rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
|
|
|
|
if (rq == this_rq())
|
|
__hrtick_restart(rq);
|
|
else
|
|
smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
|
|
}
|
|
|
|
#else
|
|
/*
|
|
* Called to set the hrtick timer state.
|
|
*
|
|
* called with rq->lock held and irqs disabled
|
|
*/
|
|
void hrtick_start(struct rq *rq, u64 delay)
|
|
{
|
|
/*
|
|
* Don't schedule slices shorter than 10000ns, that just
|
|
* doesn't make sense. Rely on vruntime for fairness.
|
|
*/
|
|
delay = max_t(u64, delay, 10000LL);
|
|
hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
|
|
HRTIMER_MODE_REL_PINNED_HARD);
|
|
}
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
static void hrtick_rq_init(struct rq *rq)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
|
|
#endif
|
|
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
|
|
rq->hrtick_timer.function = hrtick;
|
|
}
|
|
#else /* CONFIG_SCHED_HRTICK */
|
|
static inline void hrtick_clear(struct rq *rq)
|
|
{
|
|
}
|
|
|
|
static inline void hrtick_rq_init(struct rq *rq)
|
|
{
|
|
}
|
|
#endif /* CONFIG_SCHED_HRTICK */
|
|
|
|
/*
|
|
* cmpxchg based fetch_or, macro so it works for different integer types
|
|
*/
|
|
#define fetch_or(ptr, mask) \
|
|
({ \
|
|
typeof(ptr) _ptr = (ptr); \
|
|
typeof(mask) _mask = (mask); \
|
|
typeof(*_ptr) _old, _val = *_ptr; \
|
|
\
|
|
for (;;) { \
|
|
_old = cmpxchg(_ptr, _val, _val | _mask); \
|
|
if (_old == _val) \
|
|
break; \
|
|
_val = _old; \
|
|
} \
|
|
_old; \
|
|
})
|
|
|
|
#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
|
|
/*
|
|
* Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
|
|
* this avoids any races wrt polling state changes and thereby avoids
|
|
* spurious IPIs.
|
|
*/
|
|
static bool set_nr_and_not_polling(struct task_struct *p)
|
|
{
|
|
struct thread_info *ti = task_thread_info(p);
|
|
return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
|
|
}
|
|
|
|
/*
|
|
* Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
|
|
*
|
|
* If this returns true, then the idle task promises to call
|
|
* sched_ttwu_pending() and reschedule soon.
|
|
*/
|
|
static bool set_nr_if_polling(struct task_struct *p)
|
|
{
|
|
struct thread_info *ti = task_thread_info(p);
|
|
typeof(ti->flags) old, val = READ_ONCE(ti->flags);
|
|
|
|
for (;;) {
|
|
if (!(val & _TIF_POLLING_NRFLAG))
|
|
return false;
|
|
if (val & _TIF_NEED_RESCHED)
|
|
return true;
|
|
old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
|
|
if (old == val)
|
|
break;
|
|
val = old;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
#else
|
|
static bool set_nr_and_not_polling(struct task_struct *p)
|
|
{
|
|
set_tsk_need_resched(p);
|
|
return true;
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
static bool set_nr_if_polling(struct task_struct *p)
|
|
{
|
|
return false;
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
|
|
{
|
|
struct wake_q_node *node = &task->wake_q;
|
|
|
|
/*
|
|
* Atomically grab the task, if ->wake_q is !nil already it means
|
|
* it's already queued (either by us or someone else) and will get the
|
|
* wakeup due to that.
|
|
*
|
|
* In order to ensure that a pending wakeup will observe our pending
|
|
* state, even in the failed case, an explicit smp_mb() must be used.
|
|
*/
|
|
smp_mb__before_atomic();
|
|
if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
|
|
return false;
|
|
|
|
/*
|
|
* The head is context local, there can be no concurrency.
|
|
*/
|
|
*head->lastp = node;
|
|
head->lastp = &node->next;
|
|
head->count++;
|
|
return true;
|
|
}
|
|
|
|
/**
|
|
* wake_q_add() - queue a wakeup for 'later' waking.
|
|
* @head: the wake_q_head to add @task to
|
|
* @task: the task to queue for 'later' wakeup
|
|
*
|
|
* Queue a task for later wakeup, most likely by the wake_up_q() call in the
|
|
* same context, _HOWEVER_ this is not guaranteed, the wakeup can come
|
|
* instantly.
|
|
*
|
|
* This function must be used as-if it were wake_up_process(); IOW the task
|
|
* must be ready to be woken at this location.
|
|
*/
|
|
void wake_q_add(struct wake_q_head *head, struct task_struct *task)
|
|
{
|
|
if (__wake_q_add(head, task))
|
|
get_task_struct(task);
|
|
}
|
|
|
|
/**
|
|
* wake_q_add_safe() - safely queue a wakeup for 'later' waking.
|
|
* @head: the wake_q_head to add @task to
|
|
* @task: the task to queue for 'later' wakeup
|
|
*
|
|
* Queue a task for later wakeup, most likely by the wake_up_q() call in the
|
|
* same context, _HOWEVER_ this is not guaranteed, the wakeup can come
|
|
* instantly.
|
|
*
|
|
* This function must be used as-if it were wake_up_process(); IOW the task
|
|
* must be ready to be woken at this location.
|
|
*
|
|
* This function is essentially a task-safe equivalent to wake_q_add(). Callers
|
|
* that already hold reference to @task can call the 'safe' version and trust
|
|
* wake_q to do the right thing depending whether or not the @task is already
|
|
* queued for wakeup.
|
|
*/
|
|
void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
|
|
{
|
|
if (!__wake_q_add(head, task))
|
|
put_task_struct(task);
|
|
}
|
|
|
|
void wake_up_q(struct wake_q_head *head)
|
|
{
|
|
struct wake_q_node *node = head->first;
|
|
|
|
while (node != WAKE_Q_TAIL) {
|
|
struct task_struct *task;
|
|
|
|
task = container_of(node, struct task_struct, wake_q);
|
|
/* Task can safely be re-inserted now: */
|
|
node = node->next;
|
|
task->wake_q.next = NULL;
|
|
task->wake_q_count = head->count;
|
|
|
|
/*
|
|
* wake_up_process() executes a full barrier, which pairs with
|
|
* the queueing in wake_q_add() so as not to miss wakeups.
|
|
*/
|
|
wake_up_process(task);
|
|
task->wake_q_count = 0;
|
|
put_task_struct(task);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* resched_curr - mark rq's current task 'to be rescheduled now'.
|
|
*
|
|
* On UP this means the setting of the need_resched flag, on SMP it
|
|
* might also involve a cross-CPU call to trigger the scheduler on
|
|
* the target CPU.
|
|
*/
|
|
void resched_curr(struct rq *rq)
|
|
{
|
|
struct task_struct *curr = rq->curr;
|
|
int cpu;
|
|
|
|
lockdep_assert_rq_held(rq);
|
|
|
|
if (test_tsk_need_resched(curr))
|
|
return;
|
|
|
|
cpu = cpu_of(rq);
|
|
|
|
if (cpu == smp_processor_id()) {
|
|
set_tsk_need_resched(curr);
|
|
set_preempt_need_resched();
|
|
return;
|
|
}
|
|
|
|
if (set_nr_and_not_polling(curr))
|
|
smp_send_reschedule(cpu);
|
|
else
|
|
trace_sched_wake_idle_without_ipi(cpu);
|
|
}
|
|
EXPORT_SYMBOL_GPL(resched_curr);
|
|
|
|
void resched_cpu(int cpu)
|
|
{
|
|
struct rq *rq = cpu_rq(cpu);
|
|
unsigned long flags;
|
|
|
|
raw_spin_rq_lock_irqsave(rq, flags);
|
|
if (cpu_online(cpu) || cpu == smp_processor_id())
|
|
resched_curr(rq);
|
|
raw_spin_rq_unlock_irqrestore(rq, flags);
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
|
/*
|
|
* In the semi idle case, use the nearest busy CPU for migrating timers
|
|
* from an idle CPU. This is good for power-savings.
|
|
*
|
|
* We don't do similar optimization for completely idle system, as
|
|
* selecting an idle CPU will add more delays to the timers than intended
|
|
* (as that CPU's timer base may not be uptodate wrt jiffies etc).
|
|
*/
|
|
int get_nohz_timer_target(void)
|
|
{
|
|
int i, cpu = smp_processor_id(), default_cpu = -1;
|
|
struct sched_domain *sd;
|
|
const struct cpumask *hk_mask;
|
|
bool done = false;
|
|
|
|
trace_android_rvh_get_nohz_timer_target(&cpu, &done);
|
|
if (done)
|
|
return cpu;
|
|
|
|
if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) {
|
|
if (!idle_cpu(cpu))
|
|
return cpu;
|
|
default_cpu = cpu;
|
|
}
|
|
|
|
hk_mask = housekeeping_cpumask(HK_FLAG_TIMER);
|
|
|
|
rcu_read_lock();
|
|
for_each_domain(cpu, sd) {
|
|
for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
|
|
if (cpu == i)
|
|
continue;
|
|
|
|
if (!idle_cpu(i)) {
|
|
cpu = i;
|
|
goto unlock;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (default_cpu == -1)
|
|
default_cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
|
|
cpu = default_cpu;
|
|
unlock:
|
|
rcu_read_unlock();
|
|
return cpu;
|
|
}
|
|
|
|
/*
|
|
* When add_timer_on() enqueues a timer into the timer wheel of an
|
|
* idle CPU then this timer might expire before the next timer event
|
|
* which is scheduled to wake up that CPU. In case of a completely
|
|
* idle system the next event might even be infinite time into the
|
|
* future. wake_up_idle_cpu() ensures that the CPU is woken up and
|
|
* leaves the inner idle loop so the newly added timer is taken into
|
|
* account when the CPU goes back to idle and evaluates the timer
|
|
* wheel for the next timer event.
|
|
*/
|
|
static void wake_up_idle_cpu(int cpu)
|
|
{
|
|
struct rq *rq = cpu_rq(cpu);
|
|
|
|
if (cpu == smp_processor_id())
|
|
return;
|
|
|
|
if (set_nr_and_not_polling(rq->idle))
|
|
smp_send_reschedule(cpu);
|
|
else
|
|
trace_sched_wake_idle_without_ipi(cpu);
|
|
}
|
|
|
|
static bool wake_up_full_nohz_cpu(int cpu)
|
|
{
|
|
/*
|
|
* We just need the target to call irq_exit() and re-evaluate
|
|
* the next tick. The nohz full kick at least implies that.
|
|
* If needed we can still optimize that later with an
|
|
* empty IRQ.
|
|
*/
|
|
if (cpu_is_offline(cpu))
|
|
return true; /* Don't try to wake offline CPUs. */
|
|
if (tick_nohz_full_cpu(cpu)) {
|
|
if (cpu != smp_processor_id() ||
|
|
tick_nohz_tick_stopped())
|
|
tick_nohz_full_kick_cpu(cpu);
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
/*
|
|
* Wake up the specified CPU. If the CPU is going offline, it is the
|
|
* caller's responsibility to deal with the lost wakeup, for example,
|
|
* by hooking into the CPU_DEAD notifier like timers and hrtimers do.
|
|
*/
|
|
void wake_up_nohz_cpu(int cpu)
|
|
{
|
|
if (!wake_up_full_nohz_cpu(cpu))
|
|
wake_up_idle_cpu(cpu);
|
|
}
|
|
|
|
static void nohz_csd_func(void *info)
|
|
{
|
|
struct rq *rq = info;
|
|
int cpu = cpu_of(rq);
|
|
unsigned int flags;
|
|
|
|
/*
|
|
* Release the rq::nohz_csd.
|
|
*/
|
|
flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
|
|
WARN_ON(!(flags & NOHZ_KICK_MASK));
|
|
|
|
rq->idle_balance = idle_cpu(cpu);
|
|
if (rq->idle_balance && !need_resched()) {
|
|
rq->nohz_idle_balance = flags;
|
|
raise_softirq_irqoff(SCHED_SOFTIRQ);
|
|
}
|
|
}
|
|
|
|
#endif /* CONFIG_NO_HZ_COMMON */
|
|
|
|
#ifdef CONFIG_NO_HZ_FULL
|
|
bool sched_can_stop_tick(struct rq *rq)
|
|
{
|
|
int fifo_nr_running;
|
|
|
|
/* Deadline tasks, even if single, need the tick */
|
|
if (rq->dl.dl_nr_running)
|
|
return false;
|
|
|
|
/*
|
|
* If there are more than one RR tasks, we need the tick to affect the
|
|
* actual RR behaviour.
|
|
*/
|
|
if (rq->rt.rr_nr_running) {
|
|
if (rq->rt.rr_nr_running == 1)
|
|
return true;
|
|
else
|
|
return false;
|
|
}
|
|
|
|
/*
|
|
* If there's no RR tasks, but FIFO tasks, we can skip the tick, no
|
|
* forced preemption between FIFO tasks.
|
|
*/
|
|
fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
|
|
if (fifo_nr_running)
|
|
return true;
|
|
|
|
/*
|
|
* If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
|
|
* if there's more than one we need the tick for involuntary
|
|
* preemption.
|
|
*/
|
|
if (rq->nr_running > 1)
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
#endif /* CONFIG_NO_HZ_FULL */
|
|
#endif /* CONFIG_SMP */
|
|
|
|
#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
|
|
(defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
|
|
/*
|
|
* Iterate task_group tree rooted at *from, calling @down when first entering a
|
|
* node and @up when leaving it for the final time.
|
|
*
|
|
* Caller must hold rcu_lock or sufficient equivalent.
|
|
*/
|
|
int walk_tg_tree_from(struct task_group *from,
|
|
tg_visitor down, tg_visitor up, void *data)
|
|
{
|
|
struct task_group *parent, *child;
|
|
int ret;
|
|
|
|
parent = from;
|
|
|
|
down:
|
|
ret = (*down)(parent, data);
|
|
if (ret)
|
|
goto out;
|
|
list_for_each_entry_rcu(child, &parent->children, siblings) {
|
|
parent = child;
|
|
goto down;
|
|
|
|
up:
|
|
continue;
|
|
}
|
|
ret = (*up)(parent, data);
|
|
if (ret || parent == from)
|
|
goto out;
|
|
|
|
child = parent;
|
|
parent = parent->parent;
|
|
if (parent)
|
|
goto up;
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
int tg_nop(struct task_group *tg, void *data)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
static void set_load_weight(struct task_struct *p, bool update_load)
|
|
{
|
|
int prio = p->static_prio - MAX_RT_PRIO;
|
|
struct load_weight *load = &p->se.load;
|
|
|
|
/*
|
|
* SCHED_IDLE tasks get minimal weight:
|
|
*/
|
|
if (task_has_idle_policy(p)) {
|
|
load->weight = scale_load(WEIGHT_IDLEPRIO);
|
|
load->inv_weight = WMULT_IDLEPRIO;
|
|
return;
|
|
}
|
|
|
|
/*
|
|
* SCHED_OTHER tasks have to update their load when changing their
|
|
* weight
|
|
*/
|
|
if (update_load && p->sched_class == &fair_sched_class) {
|
|
reweight_task(p, prio);
|
|
} else {
|
|
load->weight = scale_load(sched_prio_to_weight[prio]);
|
|
load->inv_weight = sched_prio_to_wmult[prio];
|
|
}
|
|
}
|
|
|
|
#ifdef CONFIG_UCLAMP_TASK
|
|
/*
|
|
* Serializes updates of utilization clamp values
|
|
*
|
|
* The (slow-path) user-space triggers utilization clamp value updates which
|
|
* can require updates on (fast-path) scheduler's data structures used to
|
|
* support enqueue/dequeue operations.
|
|
* While the per-CPU rq lock protects fast-path update operations, user-space
|
|
* requests are serialized using a mutex to reduce the risk of conflicting
|
|
* updates or API abuses.
|
|
*/
|
|
static DEFINE_MUTEX(uclamp_mutex);
|
|
|
|
/* Max allowed minimum utilization */
|
|
unsigned int sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
|
|
|
|
/* Max allowed maximum utilization */
|
|
unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
|
|
|
|
/*
|
|
* By default RT tasks run at the maximum performance point/capacity of the
|
|
* system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
|
|
* SCHED_CAPACITY_SCALE.
|
|
*
|
|
* This knob allows admins to change the default behavior when uclamp is being
|
|
* used. In battery powered devices, particularly, running at the maximum
|
|
* capacity and frequency will increase energy consumption and shorten the
|
|
* battery life.
|
|
*
|
|
* This knob only affects RT tasks that their uclamp_se->user_defined == false.
|
|
*
|
|
* This knob will not override the system default sched_util_clamp_min defined
|
|
* above.
|
|
*/
|
|
unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
|
|
|
|
/* All clamps are required to be less or equal than these values */
|
|
static struct uclamp_se uclamp_default[UCLAMP_CNT];
|
|
|
|
/*
|
|
* This static key is used to reduce the uclamp overhead in the fast path. It
|
|
* primarily disables the call to uclamp_rq_{inc, dec}() in
|
|
* enqueue/dequeue_task().
|
|
*
|
|
* This allows users to continue to enable uclamp in their kernel config with
|
|
* minimum uclamp overhead in the fast path.
|
|
*
|
|
* As soon as userspace modifies any of the uclamp knobs, the static key is
|
|
* enabled, since we have an actual users that make use of uclamp
|
|
* functionality.
|
|
*
|
|
* The knobs that would enable this static key are:
|
|
*
|
|
* * A task modifying its uclamp value with sched_setattr().
|
|
* * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
|
|
* * An admin modifying the cgroup cpu.uclamp.{min, max}
|
|
*/
|
|
DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
|
|
EXPORT_SYMBOL_GPL(sched_uclamp_used);
|
|
|
|
/* Integer rounded range for each bucket */
|
|
#define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS)
|
|
|
|
#define for_each_clamp_id(clamp_id) \
|
|
for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++)
|
|
|
|
static inline unsigned int uclamp_bucket_id(unsigned int clamp_value)
|
|
{
|
|
return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1);
|
|
}
|
|
|
|
static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
|
|
{
|
|
if (clamp_id == UCLAMP_MIN)
|
|
return 0;
|
|
return SCHED_CAPACITY_SCALE;
|
|
}
|
|
|
|
static inline void uclamp_se_set(struct uclamp_se *uc_se,
|
|
unsigned int value, bool user_defined)
|
|
{
|
|
uc_se->value = value;
|
|
uc_se->bucket_id = uclamp_bucket_id(value);
|
|
uc_se->user_defined = user_defined;
|
|
}
|
|
|
|
static inline unsigned int
|
|
uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
|
|
unsigned int clamp_value)
|
|
{
|
|
/*
|
|
* Avoid blocked utilization pushing up the frequency when we go
|
|
* idle (which drops the max-clamp) by retaining the last known
|
|
* max-clamp.
|
|
*/
|
|
if (clamp_id == UCLAMP_MAX) {
|
|
rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
|
|
return clamp_value;
|
|
}
|
|
|
|
return uclamp_none(UCLAMP_MIN);
|
|
}
|
|
|
|
static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
|
|
unsigned int clamp_value)
|
|
{
|
|
/* Reset max-clamp retention only on idle exit */
|
|
if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
|
|
return;
|
|
|
|
WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value);
|
|
}
|
|
|
|
static inline
|
|
unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
|
|
unsigned int clamp_value)
|
|
{
|
|
struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
|
|
int bucket_id = UCLAMP_BUCKETS - 1;
|
|
|
|
/*
|
|
* Since both min and max clamps are max aggregated, find the
|
|
* top most bucket with tasks in.
|
|
*/
|
|
for ( ; bucket_id >= 0; bucket_id--) {
|
|
if (!bucket[bucket_id].tasks)
|
|
continue;
|
|
return bucket[bucket_id].value;
|
|
}
|
|
|
|
/* No tasks -- default clamp values */
|
|
return uclamp_idle_value(rq, clamp_id, clamp_value);
|
|
}
|
|
|
|
static void __uclamp_update_util_min_rt_default(struct task_struct *p)
|
|
{
|
|
unsigned int default_util_min;
|
|
struct uclamp_se *uc_se;
|
|
|
|
lockdep_assert_held(&p->pi_lock);
|
|
|
|
uc_se = &p->uclamp_req[UCLAMP_MIN];
|
|
|
|
/* Only sync if user didn't override the default */
|
|
if (uc_se->user_defined)
|
|
return;
|
|
|
|
default_util_min = sysctl_sched_uclamp_util_min_rt_default;
|
|
uclamp_se_set(uc_se, default_util_min, false);
|
|
}
|
|
|
|
static void uclamp_update_util_min_rt_default(struct task_struct *p)
|
|
{
|
|
struct rq_flags rf;
|
|
struct rq *rq;
|
|
|
|
if (!rt_task(p))
|
|
return;
|
|
|
|
/* Protect updates to p->uclamp_* */
|
|
rq = task_rq_lock(p, &rf);
|
|
__uclamp_update_util_min_rt_default(p);
|
|
task_rq_unlock(rq, p, &rf);
|
|
}
|
|
|
|
static void uclamp_sync_util_min_rt_default(void)
|
|
{
|
|
struct task_struct *g, *p;
|
|
|
|
/*
|
|
* copy_process() sysctl_uclamp
|
|
* uclamp_min_rt = X;
|
|
* write_lock(&tasklist_lock) read_lock(&tasklist_lock)
|
|
* // link thread smp_mb__after_spinlock()
|
|
* write_unlock(&tasklist_lock) read_unlock(&tasklist_lock);
|
|
* sched_post_fork() for_each_process_thread()
|
|
* __uclamp_sync_rt() __uclamp_sync_rt()
|
|
*
|
|
* Ensures that either sched_post_fork() will observe the new
|
|
* uclamp_min_rt or for_each_process_thread() will observe the new
|
|
* task.
|
|
*/
|
|
read_lock(&tasklist_lock);
|
|
smp_mb__after_spinlock();
|
|
read_unlock(&tasklist_lock);
|
|
|
|
rcu_read_lock();
|
|
for_each_process_thread(g, p)
|
|
uclamp_update_util_min_rt_default(p);
|
|
rcu_read_unlock();
|
|
}
|
|
|
|
static inline struct uclamp_se
|
|
uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
|
|
{
|
|
/* Copy by value as we could modify it */
|
|
struct uclamp_se uc_req = p->uclamp_req[clamp_id];
|
|
#ifdef CONFIG_UCLAMP_TASK_GROUP
|
|
unsigned int tg_min, tg_max, value;
|
|
|
|
/*
|
|
* Tasks in autogroups or root task group will be
|
|
* restricted by system defaults.
|
|
*/
|
|
if (task_group_is_autogroup(task_group(p)))
|
|
return uc_req;
|
|
if (task_group(p) == &root_task_group)
|
|
return uc_req;
|
|
|
|
tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
|
|
tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
|
|
value = uc_req.value;
|
|
value = clamp(value, tg_min, tg_max);
|
|
uclamp_se_set(&uc_req, value, false);
|
|
#endif
|
|
|
|
return uc_req;
|
|
}
|
|
|
|
/*
|
|
* The effective clamp bucket index of a task depends on, by increasing
|
|
* priority:
|
|
* - the task specific clamp value, when explicitly requested from userspace
|
|
* - the task group effective clamp value, for tasks not either in the root
|
|
* group or in an autogroup
|
|
* - the system default clamp value, defined by the sysadmin
|
|
*/
|
|
static inline struct uclamp_se
|
|
uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
|
|
{
|
|
struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
|
|
struct uclamp_se uc_max = uclamp_default[clamp_id];
|
|
struct uclamp_se uc_eff;
|
|
int ret = 0;
|
|
|
|
trace_android_rvh_uclamp_eff_get(p, clamp_id, &uc_max, &uc_eff, &ret);
|
|
if (ret)
|
|
return uc_eff;
|
|
|
|
/* System default restrictions always apply */
|
|
if (unlikely(uc_req.value > uc_max.value))
|
|
return uc_max;
|
|
|
|
return uc_req;
|
|
}
|
|
|
|
unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
|
|
{
|
|
struct uclamp_se uc_eff;
|
|
|
|
/* Task currently refcounted: use back-annotated (effective) value */
|
|
if (p->uclamp[clamp_id].active)
|
|
return (unsigned long)p->uclamp[clamp_id].value;
|
|
|
|
uc_eff = uclamp_eff_get(p, clamp_id);
|
|
|
|
return (unsigned long)uc_eff.value;
|
|
}
|
|
EXPORT_SYMBOL_GPL(uclamp_eff_value);
|
|
|
|
/*
|
|
* When a task is enqueued on a rq, the clamp bucket currently defined by the
|
|
* task's uclamp::bucket_id is refcounted on that rq. This also immediately
|
|
* updates the rq's clamp value if required.
|
|
*
|
|
* Tasks can have a task-specific value requested from user-space, track
|
|
* within each bucket the maximum value for tasks refcounted in it.
|
|
* This "local max aggregation" allows to track the exact "requested" value
|
|
* for each bucket when all its RUNNABLE tasks require the same clamp.
|
|
*/
|
|
static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
|
|
enum uclamp_id clamp_id)
|
|
{
|
|
struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
|
|
struct uclamp_se *uc_se = &p->uclamp[clamp_id];
|
|
struct uclamp_bucket *bucket;
|
|
|
|
lockdep_assert_rq_held(rq);
|
|
|
|
/* Update task effective clamp */
|
|
p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
|
|
|
|
bucket = &uc_rq->bucket[uc_se->bucket_id];
|
|
bucket->tasks++;
|
|
uc_se->active = true;
|
|
|
|
uclamp_idle_reset(rq, clamp_id, uc_se->value);
|
|
|
|
/*
|
|
* Local max aggregation: rq buckets always track the max
|
|
* "requested" clamp value of its RUNNABLE tasks.
|
|
*/
|
|
if (bucket->tasks == 1 || uc_se->value > bucket->value)
|
|
bucket->value = uc_se->value;
|
|
|
|
if (uc_se->value > READ_ONCE(uc_rq->value))
|
|
WRITE_ONCE(uc_rq->value, uc_se->value);
|
|
}
|
|
|
|
/*
|
|
* When a task is dequeued from a rq, the clamp bucket refcounted by the task
|
|
* is released. If this is the last task reference counting the rq's max
|
|
* active clamp value, then the rq's clamp value is updated.
|
|
*
|
|
* Both refcounted tasks and rq's cached clamp values are expected to be
|
|
* always valid. If it's detected they are not, as defensive programming,
|
|
* enforce the expected state and warn.
|
|
*/
|
|
static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
|
|
enum uclamp_id clamp_id)
|
|
{
|
|
struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
|
|
struct uclamp_se *uc_se = &p->uclamp[clamp_id];
|
|
struct uclamp_bucket *bucket;
|
|
unsigned int bkt_clamp;
|
|
unsigned int rq_clamp;
|
|
|
|
lockdep_assert_rq_held(rq);
|
|
|
|
/*
|
|
* If sched_uclamp_used was enabled after task @p was enqueued,
|
|
* we could end up with unbalanced call to uclamp_rq_dec_id().
|
|
*
|
|
* In this case the uc_se->active flag should be false since no uclamp
|
|
* accounting was performed at enqueue time and we can just return
|
|
* here.
|
|
*
|
|
* Need to be careful of the following enqueue/dequeue ordering
|
|
* problem too
|
|
*
|
|
* enqueue(taskA)
|
|
* // sched_uclamp_used gets enabled
|
|
* enqueue(taskB)
|
|
* dequeue(taskA)
|
|
* // Must not decrement bucket->tasks here
|
|
* dequeue(taskB)
|
|
*
|
|
* where we could end up with stale data in uc_se and
|
|
* bucket[uc_se->bucket_id].
|
|
*
|
|
* The following check here eliminates the possibility of such race.
|
|
*/
|
|
if (unlikely(!uc_se->active))
|
|
return;
|
|
|
|
bucket = &uc_rq->bucket[uc_se->bucket_id];
|
|
|
|
SCHED_WARN_ON(!bucket->tasks);
|
|
if (likely(bucket->tasks))
|
|
bucket->tasks--;
|
|
|
|
uc_se->active = false;
|
|
|
|
/*
|
|
* Keep "local max aggregation" simple and accept to (possibly)
|
|
* overboost some RUNNABLE tasks in the same bucket.
|
|
* The rq clamp bucket value is reset to its base value whenever
|
|
* there are no more RUNNABLE tasks refcounting it.
|
|
*/
|
|
if (likely(bucket->tasks))
|
|
return;
|
|
|
|
rq_clamp = READ_ONCE(uc_rq->value);
|
|
/*
|
|
* Defensive programming: this should never happen. If it happens,
|
|
* e.g. due to future modification, warn and fixup the expected value.
|
|
*/
|
|
SCHED_WARN_ON(bucket->value > rq_clamp);
|
|
if (bucket->value >= rq_clamp) {
|
|
bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
|
|
WRITE_ONCE(uc_rq->value, bkt_clamp);
|
|
}
|
|
}
|
|
|
|
static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
|
|
{
|
|
enum uclamp_id clamp_id;
|
|
|
|
/*
|
|
* Avoid any overhead until uclamp is actually used by the userspace.
|
|
*
|
|
* The condition is constructed such that a NOP is generated when
|
|
* sched_uclamp_used is disabled.
|
|
*/
|
|
if (!static_branch_unlikely(&sched_uclamp_used))
|
|
return;
|
|
|
|
if (unlikely(!p->sched_class->uclamp_enabled))
|
|
return;
|
|
|
|
for_each_clamp_id(clamp_id)
|
|
uclamp_rq_inc_id(rq, p, clamp_id);
|
|
|
|
/* Reset clamp idle holding when there is one RUNNABLE task */
|
|
if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
|
|
rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
|
|
}
|
|
|
|
static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
|
|
{
|
|
enum uclamp_id clamp_id;
|
|
|
|
/*
|
|
* Avoid any overhead until uclamp is actually used by the userspace.
|
|
*
|
|
* The condition is constructed such that a NOP is generated when
|
|
* sched_uclamp_used is disabled.
|
|
*/
|
|
if (!static_branch_unlikely(&sched_uclamp_used))
|
|
return;
|
|
|
|
if (unlikely(!p->sched_class->uclamp_enabled))
|
|
return;
|
|
|
|
for_each_clamp_id(clamp_id)
|
|
uclamp_rq_dec_id(rq, p, clamp_id);
|
|
}
|
|
|
|
static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
|
|
enum uclamp_id clamp_id)
|
|
{
|
|
if (!p->uclamp[clamp_id].active)
|
|
return;
|
|
|
|
uclamp_rq_dec_id(rq, p, clamp_id);
|
|
uclamp_rq_inc_id(rq, p, clamp_id);
|
|
|
|
/*
|
|
* Make sure to clear the idle flag if we've transiently reached 0
|
|
* active tasks on rq.
|
|
*/
|
|
if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
|
|
rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
|
|
}
|
|
|
|
static inline void
|
|
uclamp_update_active(struct task_struct *p)
|
|
{
|
|
enum uclamp_id clamp_id;
|
|
struct rq_flags rf;
|
|
struct rq *rq;
|
|
|
|
/*
|
|
* Lock the task and the rq where the task is (or was) queued.
|
|
*
|
|
* We might lock the (previous) rq of a !RUNNABLE task, but that's the
|
|
* price to pay to safely serialize util_{min,max} updates with
|
|
* enqueues, dequeues and migration operations.
|
|
* This is the same locking schema used by __set_cpus_allowed_ptr().
|
|
*/
|
|
rq = task_rq_lock(p, &rf);
|
|
|
|
/*
|
|
* Setting the clamp bucket is serialized by task_rq_lock().
|
|
* If the task is not yet RUNNABLE and its task_struct is not
|
|
* affecting a valid clamp bucket, the next time it's enqueued,
|
|
* it will already see the updated clamp bucket value.
|
|
*/
|
|
for_each_clamp_id(clamp_id)
|
|
uclamp_rq_reinc_id(rq, p, clamp_id);
|
|
|
|
task_rq_unlock(rq, p, &rf);
|
|
}
|
|
|
|
#ifdef CONFIG_UCLAMP_TASK_GROUP
|
|
static inline void
|
|
uclamp_update_active_tasks(struct cgroup_subsys_state *css)
|
|
{
|
|
struct css_task_iter it;
|
|
struct task_struct *p;
|
|
|
|
css_task_iter_start(css, 0, &it);
|
|
while ((p = css_task_iter_next(&it)))
|
|
uclamp_update_active(p);
|
|
css_task_iter_end(&it);
|
|
}
|
|
|
|
static void cpu_util_update_eff(struct cgroup_subsys_state *css);
|
|
static void uclamp_update_root_tg(void)
|
|
{
|
|
struct task_group *tg = &root_task_group;
|
|
|
|
uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
|
|
sysctl_sched_uclamp_util_min, false);
|
|
uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
|
|
sysctl_sched_uclamp_util_max, false);
|
|
|
|
rcu_read_lock();
|
|
cpu_util_update_eff(&root_task_group.css);
|
|
rcu_read_unlock();
|
|
}
|
|
#else
|
|
static void uclamp_update_root_tg(void) { }
|
|
#endif
|
|
|
|
int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
|
|
void *buffer, size_t *lenp, loff_t *ppos)
|
|
{
|
|
bool update_root_tg = false;
|
|
int old_min, old_max, old_min_rt;
|
|
int result;
|
|
|
|
mutex_lock(&uclamp_mutex);
|
|
old_min = sysctl_sched_uclamp_util_min;
|
|
old_max = sysctl_sched_uclamp_util_max;
|
|
old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
|
|
|
|
result = proc_dointvec(table, write, buffer, lenp, ppos);
|
|
if (result)
|
|
goto undo;
|
|
if (!write)
|
|
goto done;
|
|
|
|
if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
|
|
sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE ||
|
|
sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
|
|
|
|
result = -EINVAL;
|
|
goto undo;
|
|
}
|
|
|
|
if (old_min != sysctl_sched_uclamp_util_min) {
|
|
uclamp_se_set(&uclamp_default[UCLAMP_MIN],
|
|
sysctl_sched_uclamp_util_min, false);
|
|
update_root_tg = true;
|
|
}
|
|
if (old_max != sysctl_sched_uclamp_util_max) {
|
|
uclamp_se_set(&uclamp_default[UCLAMP_MAX],
|
|
sysctl_sched_uclamp_util_max, false);
|
|
update_root_tg = true;
|
|
}
|
|
|
|
if (update_root_tg) {
|
|
static_branch_enable(&sched_uclamp_used);
|
|
uclamp_update_root_tg();
|
|
}
|
|
|
|
if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
|
|
static_branch_enable(&sched_uclamp_used);
|
|
uclamp_sync_util_min_rt_default();
|
|
}
|
|
|
|
/*
|
|
* We update all RUNNABLE tasks only when task groups are in use.
|
|
* Otherwise, keep it simple and do just a lazy update at each next
|
|
* task enqueue time.
|
|
*/
|
|
|
|
goto done;
|
|
|
|
undo:
|
|
sysctl_sched_uclamp_util_min = old_min;
|
|
sysctl_sched_uclamp_util_max = old_max;
|
|
sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
|
|
done:
|
|
mutex_unlock(&uclamp_mutex);
|
|
|
|
return result;
|
|
}
|
|
|
|
static int uclamp_validate(struct task_struct *p,
|
|
const struct sched_attr *attr)
|
|
{
|
|
int util_min = p->uclamp_req[UCLAMP_MIN].value;
|
|
int util_max = p->uclamp_req[UCLAMP_MAX].value;
|
|
|
|
if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
|
|
util_min = attr->sched_util_min;
|
|
|
|
if (util_min + 1 > SCHED_CAPACITY_SCALE + 1)
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
|
|
util_max = attr->sched_util_max;
|
|
|
|
if (util_max + 1 > SCHED_CAPACITY_SCALE + 1)
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (util_min != -1 && util_max != -1 && util_min > util_max)
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* We have valid uclamp attributes; make sure uclamp is enabled.
|
|
*
|
|
* We need to do that here, because enabling static branches is a
|
|
* blocking operation which obviously cannot be done while holding
|
|
* scheduler locks.
|
|
*/
|
|
static_branch_enable(&sched_uclamp_used);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static bool uclamp_reset(const struct sched_attr *attr,
|
|
enum uclamp_id clamp_id,
|
|
struct uclamp_se *uc_se)
|
|
{
|
|
/* Reset on sched class change for a non user-defined clamp value. */
|
|
if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) &&
|
|
!uc_se->user_defined)
|
|
return true;
|
|
|
|
/* Reset on sched_util_{min,max} == -1. */
|
|
if (clamp_id == UCLAMP_MIN &&
|
|
attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
|
|
attr->sched_util_min == -1) {
|
|
return true;
|
|
}
|
|
|
|
if (clamp_id == UCLAMP_MAX &&
|
|
attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
|
|
attr->sched_util_max == -1) {
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
static void __setscheduler_uclamp(struct task_struct *p,
|
|
const struct sched_attr *attr)
|
|
{
|
|
enum uclamp_id clamp_id;
|
|
|
|
for_each_clamp_id(clamp_id) {
|
|
struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
|
|
unsigned int value;
|
|
|
|
if (!uclamp_reset(attr, clamp_id, uc_se))
|
|
continue;
|
|
|
|
/*
|
|
* RT by default have a 100% boost value that could be modified
|
|
* at runtime.
|
|
*/
|
|
if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
|
|
value = sysctl_sched_uclamp_util_min_rt_default;
|
|
else
|
|
value = uclamp_none(clamp_id);
|
|
|
|
uclamp_se_set(uc_se, value, false);
|
|
|
|
}
|
|
|
|
if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)))
|
|
return;
|
|
|
|
if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
|
|
attr->sched_util_min != -1) {
|
|
uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
|
|
attr->sched_util_min, true);
|
|
trace_android_vh_setscheduler_uclamp(p, UCLAMP_MIN, attr->sched_util_min);
|
|
}
|
|
|
|
if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
|
|
attr->sched_util_max != -1) {
|
|
uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
|
|
attr->sched_util_max, true);
|
|
trace_android_vh_setscheduler_uclamp(p, UCLAMP_MAX, attr->sched_util_max);
|
|
}
|
|
}
|
|
|
|
static void uclamp_fork(struct task_struct *p)
|
|
{
|
|
enum uclamp_id clamp_id;
|
|
|
|
/*
|
|
* We don't need to hold task_rq_lock() when updating p->uclamp_* here
|
|
* as the task is still at its early fork stages.
|
|
*/
|
|
for_each_clamp_id(clamp_id)
|
|
p->uclamp[clamp_id].active = false;
|
|
|
|
if (likely(!p->sched_reset_on_fork))
|
|
return;
|
|
|
|
for_each_clamp_id(clamp_id) {
|
|
uclamp_se_set(&p->uclamp_req[clamp_id],
|
|
uclamp_none(clamp_id), false);
|
|
}
|
|
}
|
|
|
|
static void uclamp_post_fork(struct task_struct *p)
|
|
{
|
|
uclamp_update_util_min_rt_default(p);
|
|
}
|
|
|
|
static void __init init_uclamp_rq(struct rq *rq)
|
|
{
|
|
enum uclamp_id clamp_id;
|
|
struct uclamp_rq *uc_rq = rq->uclamp;
|
|
|
|
for_each_clamp_id(clamp_id) {
|
|
uc_rq[clamp_id] = (struct uclamp_rq) {
|
|
.value = uclamp_none(clamp_id)
|
|
};
|
|
}
|
|
|
|
rq->uclamp_flags = UCLAMP_FLAG_IDLE;
|
|
}
|
|
|
|
static void __init init_uclamp(void)
|
|
{
|
|
struct uclamp_se uc_max = {};
|
|
enum uclamp_id clamp_id;
|
|
int cpu;
|
|
|
|
for_each_possible_cpu(cpu)
|
|
init_uclamp_rq(cpu_rq(cpu));
|
|
|
|
for_each_clamp_id(clamp_id) {
|
|
uclamp_se_set(&init_task.uclamp_req[clamp_id],
|
|
uclamp_none(clamp_id), false);
|
|
}
|
|
|
|
/* System defaults allow max clamp values for both indexes */
|
|
uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
|
|
for_each_clamp_id(clamp_id) {
|
|
uclamp_default[clamp_id] = uc_max;
|
|
#ifdef CONFIG_UCLAMP_TASK_GROUP
|
|
root_task_group.uclamp_req[clamp_id] = uc_max;
|
|
root_task_group.uclamp[clamp_id] = uc_max;
|
|
#endif
|
|
}
|
|
}
|
|
|
|
#else /* CONFIG_UCLAMP_TASK */
|
|
static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
|
|
static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
|
|
static inline int uclamp_validate(struct task_struct *p,
|
|
const struct sched_attr *attr)
|
|
{
|
|
return -EOPNOTSUPP;
|
|
}
|
|
static void __setscheduler_uclamp(struct task_struct *p,
|
|
const struct sched_attr *attr) { }
|
|
static inline void uclamp_fork(struct task_struct *p) { }
|
|
static inline void uclamp_post_fork(struct task_struct *p) { }
|
|
static inline void init_uclamp(void) { }
|
|
#endif /* CONFIG_UCLAMP_TASK */
|
|
|
|
bool sched_task_on_rq(struct task_struct *p)
|
|
{
|
|
return task_on_rq_queued(p);
|
|
}
|
|
|
|
static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
if (!(flags & ENQUEUE_NOCLOCK))
|
|
update_rq_clock(rq);
|
|
|
|
if (!(flags & ENQUEUE_RESTORE)) {
|
|
sched_info_enqueue(rq, p);
|
|
psi_enqueue(p, flags & ENQUEUE_WAKEUP);
|
|
}
|
|
|
|
uclamp_rq_inc(rq, p);
|
|
trace_android_rvh_enqueue_task(rq, p, flags);
|
|
p->sched_class->enqueue_task(rq, p, flags);
|
|
trace_android_rvh_after_enqueue_task(rq, p, flags);
|
|
|
|
if (sched_core_enabled(rq))
|
|
sched_core_enqueue(rq, p);
|
|
}
|
|
|
|
static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
if (sched_core_enabled(rq))
|
|
sched_core_dequeue(rq, p);
|
|
|
|
if (!(flags & DEQUEUE_NOCLOCK))
|
|
update_rq_clock(rq);
|
|
|
|
if (!(flags & DEQUEUE_SAVE)) {
|
|
sched_info_dequeue(rq, p);
|
|
psi_dequeue(p, flags & DEQUEUE_SLEEP);
|
|
}
|
|
|
|
uclamp_rq_dec(rq, p);
|
|
trace_android_rvh_dequeue_task(rq, p, flags);
|
|
p->sched_class->dequeue_task(rq, p, flags);
|
|
trace_android_rvh_after_dequeue_task(rq, p, flags);
|
|
}
|
|
|
|
void activate_task(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
enqueue_task(rq, p, flags);
|
|
|
|
p->on_rq = TASK_ON_RQ_QUEUED;
|
|
}
|
|
EXPORT_SYMBOL_GPL(activate_task);
|
|
|
|
void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING;
|
|
|
|
dequeue_task(rq, p, flags);
|
|
}
|
|
EXPORT_SYMBOL_GPL(deactivate_task);
|
|
|
|
static inline int __normal_prio(int policy, int rt_prio, int nice)
|
|
{
|
|
int prio;
|
|
|
|
if (dl_policy(policy))
|
|
prio = MAX_DL_PRIO - 1;
|
|
else if (rt_policy(policy))
|
|
prio = MAX_RT_PRIO - 1 - rt_prio;
|
|
else
|
|
prio = NICE_TO_PRIO(nice);
|
|
|
|
return prio;
|
|
}
|
|
|
|
/*
|
|
* Calculate the expected normal priority: i.e. priority
|
|
* without taking RT-inheritance into account. Might be
|
|
* boosted by interactivity modifiers. Changes upon fork,
|
|
* setprio syscalls, and whenever the interactivity
|
|
* estimator recalculates.
|
|
*/
|
|
static inline int normal_prio(struct task_struct *p)
|
|
{
|
|
return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
|
|
}
|
|
|
|
/*
|
|
* Calculate the current priority, i.e. the priority
|
|
* taken into account by the scheduler. This value might
|
|
* be boosted by RT tasks, or might be boosted by
|
|
* interactivity modifiers. Will be RT if the task got
|
|
* RT-boosted. If not then it returns p->normal_prio.
|
|
*/
|
|
static int effective_prio(struct task_struct *p)
|
|
{
|
|
p->normal_prio = normal_prio(p);
|
|
/*
|
|
* If we are RT tasks or we were boosted to RT priority,
|
|
* keep the priority unchanged. Otherwise, update priority
|
|
* to the normal priority:
|
|
*/
|
|
if (!rt_prio(p->prio))
|
|
return p->normal_prio;
|
|
return p->prio;
|
|
}
|
|
|
|
/**
|
|
* task_curr - is this task currently executing on a CPU?
|
|
* @p: the task in question.
|
|
*
|
|
* Return: 1 if the task is currently executing. 0 otherwise.
|
|
*/
|
|
inline int task_curr(const struct task_struct *p)
|
|
{
|
|
return cpu_curr(task_cpu(p)) == p;
|
|
}
|
|
|
|
/*
|
|
* switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
|
|
* use the balance_callback list if you want balancing.
|
|
*
|
|
* this means any call to check_class_changed() must be followed by a call to
|
|
* balance_callback().
|
|
*/
|
|
static inline void check_class_changed(struct rq *rq, struct task_struct *p,
|
|
const struct sched_class *prev_class,
|
|
int oldprio)
|
|
{
|
|
if (prev_class != p->sched_class) {
|
|
if (prev_class->switched_from)
|
|
prev_class->switched_from(rq, p);
|
|
|
|
p->sched_class->switched_to(rq, p);
|
|
} else if (oldprio != p->prio || dl_task(p))
|
|
p->sched_class->prio_changed(rq, p, oldprio);
|
|
}
|
|
|
|
void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
if (p->sched_class == rq->curr->sched_class)
|
|
rq->curr->sched_class->check_preempt_curr(rq, p, flags);
|
|
else if (p->sched_class > rq->curr->sched_class)
|
|
resched_curr(rq);
|
|
|
|
/*
|
|
* A queue event has occurred, and we're going to schedule. In
|
|
* this case, we can save a useless back to back clock update.
|
|
*/
|
|
if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
|
|
rq_clock_skip_update(rq);
|
|
}
|
|
EXPORT_SYMBOL_GPL(check_preempt_curr);
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static void
|
|
__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
|
|
|
|
static int __set_cpus_allowed_ptr(struct task_struct *p,
|
|
const struct cpumask *new_mask,
|
|
u32 flags);
|
|
|
|
static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
|
|
{
|
|
if (likely(!p->migration_disabled))
|
|
return;
|
|
|
|
if (p->cpus_ptr != &p->cpus_mask)
|
|
return;
|
|
|
|
/*
|
|
* Violates locking rules! see comment in __do_set_cpus_allowed().
|
|
*/
|
|
__do_set_cpus_allowed(p, cpumask_of(rq->cpu), SCA_MIGRATE_DISABLE);
|
|
}
|
|
|
|
void migrate_disable(void)
|
|
{
|
|
struct task_struct *p = current;
|
|
|
|
if (p->migration_disabled) {
|
|
p->migration_disabled++;
|
|
return;
|
|
}
|
|
|
|
preempt_disable();
|
|
this_rq()->nr_pinned++;
|
|
p->migration_disabled = 1;
|
|
preempt_enable();
|
|
}
|
|
EXPORT_SYMBOL_GPL(migrate_disable);
|
|
|
|
void migrate_enable(void)
|
|
{
|
|
struct task_struct *p = current;
|
|
|
|
if (p->migration_disabled > 1) {
|
|
p->migration_disabled--;
|
|
return;
|
|
}
|
|
|
|
/*
|
|
* Ensure stop_task runs either before or after this, and that
|
|
* __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
|
|
*/
|
|
preempt_disable();
|
|
if (p->cpus_ptr != &p->cpus_mask)
|
|
__set_cpus_allowed_ptr(p, &p->cpus_mask, SCA_MIGRATE_ENABLE);
|
|
/*
|
|
* Mustn't clear migration_disabled() until cpus_ptr points back at the
|
|
* regular cpus_mask, otherwise things that race (eg.
|
|
* select_fallback_rq) get confused.
|
|
*/
|
|
barrier();
|
|
p->migration_disabled = 0;
|
|
this_rq()->nr_pinned--;
|
|
preempt_enable();
|
|
}
|
|
EXPORT_SYMBOL_GPL(migrate_enable);
|
|
|
|
static inline bool rq_has_pinned_tasks(struct rq *rq)
|
|
{
|
|
return rq->nr_pinned;
|
|
}
|
|
|
|
/*
|
|
* Per-CPU kthreads are allowed to run on !active && online CPUs, see
|
|
* __set_cpus_allowed_ptr() and select_fallback_rq().
|
|
*/
|
|
static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
|
|
{
|
|
bool allowed = true;
|
|
|
|
/* When not in the task's cpumask, no point in looking further. */
|
|
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
|
|
return false;
|
|
|
|
/* migrate_disabled() must be allowed to finish. */
|
|
if (is_migration_disabled(p))
|
|
return cpu_online(cpu);
|
|
|
|
/* check for all cases */
|
|
trace_android_rvh_is_cpu_allowed(p, cpu, &allowed);
|
|
|
|
/* Non kernel threads are not allowed during either online or offline. */
|
|
if (!(p->flags & PF_KTHREAD))
|
|
return cpu_active(cpu) && task_cpu_possible(cpu, p) && allowed;
|
|
|
|
/* KTHREAD_IS_PER_CPU is always allowed. */
|
|
if (kthread_is_per_cpu(p))
|
|
return cpu_online(cpu);
|
|
|
|
if (!allowed)
|
|
return false;
|
|
|
|
/* Regular kernel threads don't get to stay during offline. */
|
|
if (cpu_dying(cpu))
|
|
return false;
|
|
|
|
/* But are allowed during online. */
|
|
return cpu_online(cpu);
|
|
}
|
|
|
|
/*
|
|
* This is how migration works:
|
|
*
|
|
* 1) we invoke migration_cpu_stop() on the target CPU using
|
|
* stop_one_cpu().
|
|
* 2) stopper starts to run (implicitly forcing the migrated thread
|
|
* off the CPU)
|
|
* 3) it checks whether the migrated task is still in the wrong runqueue.
|
|
* 4) if it's in the wrong runqueue then the migration thread removes
|
|
* it and puts it into the right queue.
|
|
* 5) stopper completes and stop_one_cpu() returns and the migration
|
|
* is done.
|
|
*/
|
|
|
|
/*
|
|
* move_queued_task - move a queued task to new rq.
|
|
*
|
|
* Returns (locked) new rq. Old rq's lock is released.
|
|
*/
|
|
static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
|
|
struct task_struct *p, int new_cpu)
|
|
{
|
|
int detached = 0;
|
|
|
|
lockdep_assert_rq_held(rq);
|
|
|
|
/*
|
|
* The vendor hook may drop the lock temporarily, so
|
|
* pass the rq flags to unpin lock. We expect the
|
|
* rq lock to be held after return.
|
|
*/
|
|
trace_android_rvh_migrate_queued_task(rq, rf, p, new_cpu, &detached);
|
|
if (detached)
|
|
goto attach;
|
|
|
|
deactivate_task(rq, p, DEQUEUE_NOCLOCK);
|
|
set_task_cpu(p, new_cpu);
|
|
|
|
attach:
|
|
rq_unlock(rq, rf);
|
|
rq = cpu_rq(new_cpu);
|
|
|
|
rq_lock(rq, rf);
|
|
BUG_ON(task_cpu(p) != new_cpu);
|
|
activate_task(rq, p, 0);
|
|
check_preempt_curr(rq, p, 0);
|
|
|
|
return rq;
|
|
}
|
|
|
|
struct migration_arg {
|
|
struct task_struct *task;
|
|
int dest_cpu;
|
|
struct set_affinity_pending *pending;
|
|
};
|
|
|
|
/*
|
|
* @refs: number of wait_for_completion()
|
|
* @stop_pending: is @stop_work in use
|
|
*/
|
|
struct set_affinity_pending {
|
|
refcount_t refs;
|
|
unsigned int stop_pending;
|
|
struct completion done;
|
|
struct cpu_stop_work stop_work;
|
|
struct migration_arg arg;
|
|
};
|
|
|
|
/*
|
|
* Move (not current) task off this CPU, onto the destination CPU. We're doing
|
|
* this because either it can't run here any more (set_cpus_allowed()
|
|
* away from this CPU, or CPU going down), or because we're
|
|
* attempting to rebalance this task on exec (sched_exec).
|
|
*
|
|
* So we race with normal scheduler movements, but that's OK, as long
|
|
* as the task is no longer on this CPU.
|
|
*/
|
|
struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
|
|
struct task_struct *p, int dest_cpu)
|
|
{
|
|
/* Affinity changed (again). */
|
|
if (!is_cpu_allowed(p, dest_cpu))
|
|
return rq;
|
|
|
|
update_rq_clock(rq);
|
|
rq = move_queued_task(rq, rf, p, dest_cpu);
|
|
|
|
return rq;
|
|
}
|
|
EXPORT_SYMBOL_GPL(__migrate_task);
|
|
|
|
/*
|
|
* migration_cpu_stop - this will be executed by a highprio stopper thread
|
|
* and performs thread migration by bumping thread off CPU then
|
|
* 'pushing' onto another runqueue.
|
|
*/
|
|
static int migration_cpu_stop(void *data)
|
|
{
|
|
struct migration_arg *arg = data;
|
|
struct set_affinity_pending *pending = arg->pending;
|
|
struct task_struct *p = arg->task;
|
|
struct rq *rq = this_rq();
|
|
bool complete = false;
|
|
struct rq_flags rf;
|
|
|
|
/*
|
|
* The original target CPU might have gone down and we might
|
|
* be on another CPU but it doesn't matter.
|
|
*/
|
|
local_irq_save(rf.flags);
|
|
/*
|
|
* We need to explicitly wake pending tasks before running
|
|
* __migrate_task() such that we will not miss enforcing cpus_ptr
|
|
* during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
|
|
*/
|
|
flush_smp_call_function_from_idle();
|
|
|
|
raw_spin_lock(&p->pi_lock);
|
|
rq_lock(rq, &rf);
|
|
|
|
/*
|
|
* If we were passed a pending, then ->stop_pending was set, thus
|
|
* p->migration_pending must have remained stable.
|
|
*/
|
|
WARN_ON_ONCE(pending && pending != p->migration_pending);
|
|
|
|
/*
|
|
* If task_rq(p) != rq, it cannot be migrated here, because we're
|
|
* holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
|
|
* we're holding p->pi_lock.
|
|
*/
|
|
if (task_rq(p) == rq) {
|
|
if (is_migration_disabled(p))
|
|
goto out;
|
|
|
|
if (pending) {
|
|
p->migration_pending = NULL;
|
|
complete = true;
|
|
|
|
if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
|
|
goto out;
|
|
}
|
|
|
|
if (task_on_rq_queued(p))
|
|
rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
|
|
else
|
|
p->wake_cpu = arg->dest_cpu;
|
|
|
|
/*
|
|
* XXX __migrate_task() can fail, at which point we might end
|
|
* up running on a dodgy CPU, AFAICT this can only happen
|
|
* during CPU hotplug, at which point we'll get pushed out
|
|
* anyway, so it's probably not a big deal.
|
|
*/
|
|
|
|
} else if (pending) {
|
|
/*
|
|
* This happens when we get migrated between migrate_enable()'s
|
|
* preempt_enable() and scheduling the stopper task. At that
|
|
* point we're a regular task again and not current anymore.
|
|
*
|
|
* A !PREEMPT kernel has a giant hole here, which makes it far
|
|
* more likely.
|
|
*/
|
|
|
|
/*
|
|
* The task moved before the stopper got to run. We're holding
|
|
* ->pi_lock, so the allowed mask is stable - if it got
|
|
* somewhere allowed, we're done.
|
|
*/
|
|
if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
|
|
p->migration_pending = NULL;
|
|
complete = true;
|
|
goto out;
|
|
}
|
|
|
|
/*
|
|
* When migrate_enable() hits a rq mis-match we can't reliably
|
|
* determine is_migration_disabled() and so have to chase after
|
|
* it.
|
|
*/
|
|
WARN_ON_ONCE(!pending->stop_pending);
|
|
task_rq_unlock(rq, p, &rf);
|
|
stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
|
|
&pending->arg, &pending->stop_work);
|
|
return 0;
|
|
}
|
|
out:
|
|
if (pending)
|
|
pending->stop_pending = false;
|
|
task_rq_unlock(rq, p, &rf);
|
|
|
|
if (complete)
|
|
complete_all(&pending->done);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int push_cpu_stop(void *arg)
|
|
{
|
|
struct rq *lowest_rq = NULL, *rq = this_rq();
|
|
struct task_struct *p = arg;
|
|
|
|
raw_spin_lock_irq(&p->pi_lock);
|
|
raw_spin_rq_lock(rq);
|
|
|
|
if (task_rq(p) != rq)
|
|
goto out_unlock;
|
|
|
|
if (is_migration_disabled(p)) {
|
|
p->migration_flags |= MDF_PUSH;
|
|
goto out_unlock;
|
|
}
|
|
|
|
p->migration_flags &= ~MDF_PUSH;
|
|
|
|
if (p->sched_class->find_lock_rq)
|
|
lowest_rq = p->sched_class->find_lock_rq(p, rq);
|
|
|
|
if (!lowest_rq)
|
|
goto out_unlock;
|
|
|
|
// XXX validate p is still the highest prio task
|
|
if (task_rq(p) == rq) {
|
|
deactivate_task(rq, p, 0);
|
|
set_task_cpu(p, lowest_rq->cpu);
|
|
activate_task(lowest_rq, p, 0);
|
|
resched_curr(lowest_rq);
|
|
}
|
|
|
|
double_unlock_balance(rq, lowest_rq);
|
|
|
|
out_unlock:
|
|
rq->push_busy = false;
|
|
raw_spin_rq_unlock(rq);
|
|
raw_spin_unlock_irq(&p->pi_lock);
|
|
|
|
put_task_struct(p);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* sched_class::set_cpus_allowed must do the below, but is not required to
|
|
* actually call this function.
|
|
*/
|
|
void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
|
|
{
|
|
if (flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
|
|
p->cpus_ptr = new_mask;
|
|
return;
|
|
}
|
|
|
|
cpumask_copy(&p->cpus_mask, new_mask);
|
|
p->nr_cpus_allowed = cpumask_weight(new_mask);
|
|
trace_android_rvh_set_cpus_allowed_comm(p, new_mask);
|
|
}
|
|
|
|
static void
|
|
__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
|
|
{
|
|
struct rq *rq = task_rq(p);
|
|
bool queued, running;
|
|
|
|
/*
|
|
* This here violates the locking rules for affinity, since we're only
|
|
* supposed to change these variables while holding both rq->lock and
|
|
* p->pi_lock.
|
|
*
|
|
* HOWEVER, it magically works, because ttwu() is the only code that
|
|
* accesses these variables under p->pi_lock and only does so after
|
|
* smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
|
|
* before finish_task().
|
|
*
|
|
* XXX do further audits, this smells like something putrid.
|
|
*/
|
|
if (flags & SCA_MIGRATE_DISABLE)
|
|
SCHED_WARN_ON(!p->on_cpu);
|
|
else
|
|
lockdep_assert_held(&p->pi_lock);
|
|
|
|
queued = task_on_rq_queued(p);
|
|
running = task_current(rq, p);
|
|
|
|
if (queued) {
|
|
/*
|
|
* Because __kthread_bind() calls this on blocked tasks without
|
|
* holding rq->lock.
|
|
*/
|
|
lockdep_assert_rq_held(rq);
|
|
dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
|
|
}
|
|
if (running)
|
|
put_prev_task(rq, p);
|
|
|
|
p->sched_class->set_cpus_allowed(p, new_mask, flags);
|
|
|
|
if (queued)
|
|
enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
|
|
if (running)
|
|
set_next_task(rq, p);
|
|
}
|
|
|
|
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
|
|
{
|
|
__do_set_cpus_allowed(p, new_mask, 0);
|
|
}
|
|
|
|
int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
|
|
int node)
|
|
{
|
|
cpumask_t *user_mask;
|
|
unsigned long flags;
|
|
|
|
/*
|
|
* Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
|
|
* may differ by now due to racing.
|
|
*/
|
|
dst->user_cpus_ptr = NULL;
|
|
|
|
/*
|
|
* This check is racy and losing the race is a valid situation.
|
|
* It is not worth the extra overhead of taking the pi_lock on
|
|
* every fork/clone.
|
|
*/
|
|
if (data_race(!src->user_cpus_ptr))
|
|
return 0;
|
|
|
|
user_mask = kmalloc_node(cpumask_size(), GFP_KERNEL, node);
|
|
if (!user_mask)
|
|
return -ENOMEM;
|
|
|
|
/*
|
|
* Use pi_lock to protect content of user_cpus_ptr
|
|
*
|
|
* Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
|
|
* do_set_cpus_allowed().
|
|
*/
|
|
raw_spin_lock_irqsave(&src->pi_lock, flags);
|
|
if (src->user_cpus_ptr) {
|
|
swap(dst->user_cpus_ptr, user_mask);
|
|
cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
|
|
}
|
|
raw_spin_unlock_irqrestore(&src->pi_lock, flags);
|
|
|
|
if (unlikely(user_mask))
|
|
kfree(user_mask);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
|
|
{
|
|
struct cpumask *user_mask = NULL;
|
|
|
|
swap(p->user_cpus_ptr, user_mask);
|
|
|
|
return user_mask;
|
|
}
|
|
|
|
void release_user_cpus_ptr(struct task_struct *p)
|
|
{
|
|
kfree(clear_user_cpus_ptr(p));
|
|
}
|
|
|
|
/*
|
|
* This function is wildly self concurrent; here be dragons.
|
|
*
|
|
*
|
|
* When given a valid mask, __set_cpus_allowed_ptr() must block until the
|
|
* designated task is enqueued on an allowed CPU. If that task is currently
|
|
* running, we have to kick it out using the CPU stopper.
|
|
*
|
|
* Migrate-Disable comes along and tramples all over our nice sandcastle.
|
|
* Consider:
|
|
*
|
|
* Initial conditions: P0->cpus_mask = [0, 1]
|
|
*
|
|
* P0@CPU0 P1
|
|
*
|
|
* migrate_disable();
|
|
* <preempted>
|
|
* set_cpus_allowed_ptr(P0, [1]);
|
|
*
|
|
* P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
|
|
* its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
|
|
* This means we need the following scheme:
|
|
*
|
|
* P0@CPU0 P1
|
|
*
|
|
* migrate_disable();
|
|
* <preempted>
|
|
* set_cpus_allowed_ptr(P0, [1]);
|
|
* <blocks>
|
|
* <resumes>
|
|
* migrate_enable();
|
|
* __set_cpus_allowed_ptr();
|
|
* <wakes local stopper>
|
|
* `--> <woken on migration completion>
|
|
*
|
|
* Now the fun stuff: there may be several P1-like tasks, i.e. multiple
|
|
* concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
|
|
* task p are serialized by p->pi_lock, which we can leverage: the one that
|
|
* should come into effect at the end of the Migrate-Disable region is the last
|
|
* one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
|
|
* but we still need to properly signal those waiting tasks at the appropriate
|
|
* moment.
|
|
*
|
|
* This is implemented using struct set_affinity_pending. The first
|
|
* __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
|
|
* setup an instance of that struct and install it on the targeted task_struct.
|
|
* Any and all further callers will reuse that instance. Those then wait for
|
|
* a completion signaled at the tail of the CPU stopper callback (1), triggered
|
|
* on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
|
|
*
|
|
*
|
|
* (1) In the cases covered above. There is one more where the completion is
|
|
* signaled within affine_move_task() itself: when a subsequent affinity request
|
|
* occurs after the stopper bailed out due to the targeted task still being
|
|
* Migrate-Disable. Consider:
|
|
*
|
|
* Initial conditions: P0->cpus_mask = [0, 1]
|
|
*
|
|
* CPU0 P1 P2
|
|
* <P0>
|
|
* migrate_disable();
|
|
* <preempted>
|
|
* set_cpus_allowed_ptr(P0, [1]);
|
|
* <blocks>
|
|
* <migration/0>
|
|
* migration_cpu_stop()
|
|
* is_migration_disabled()
|
|
* <bails>
|
|
* set_cpus_allowed_ptr(P0, [0, 1]);
|
|
* <signal completion>
|
|
* <awakes>
|
|
*
|
|
* Note that the above is safe vs a concurrent migrate_enable(), as any
|
|
* pending affinity completion is preceded by an uninstallation of
|
|
* p->migration_pending done with p->pi_lock held.
|
|
*/
|
|
static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
|
|
int dest_cpu, unsigned int flags)
|
|
{
|
|
struct set_affinity_pending my_pending = { }, *pending = NULL;
|
|
bool stop_pending, complete = false;
|
|
|
|
/* Can the task run on the task's current CPU? If so, we're done */
|
|
if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
|
|
struct task_struct *push_task = NULL;
|
|
|
|
if ((flags & SCA_MIGRATE_ENABLE) &&
|
|
(p->migration_flags & MDF_PUSH) && !rq->push_busy) {
|
|
rq->push_busy = true;
|
|
push_task = get_task_struct(p);
|
|
}
|
|
|
|
/*
|
|
* If there are pending waiters, but no pending stop_work,
|
|
* then complete now.
|
|
*/
|
|
pending = p->migration_pending;
|
|
if (pending && !pending->stop_pending) {
|
|
p->migration_pending = NULL;
|
|
complete = true;
|
|
}
|
|
|
|
task_rq_unlock(rq, p, rf);
|
|
|
|
if (push_task) {
|
|
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
|
|
p, &rq->push_work);
|
|
}
|
|
|
|
if (complete)
|
|
complete_all(&pending->done);
|
|
|
|
return 0;
|
|
}
|
|
|
|
if (!(flags & SCA_MIGRATE_ENABLE)) {
|
|
/* serialized by p->pi_lock */
|
|
if (!p->migration_pending) {
|
|
/* Install the request */
|
|
refcount_set(&my_pending.refs, 1);
|
|
init_completion(&my_pending.done);
|
|
my_pending.arg = (struct migration_arg) {
|
|
.task = p,
|
|
.dest_cpu = dest_cpu,
|
|
.pending = &my_pending,
|
|
};
|
|
|
|
p->migration_pending = &my_pending;
|
|
} else {
|
|
pending = p->migration_pending;
|
|
refcount_inc(&pending->refs);
|
|
/*
|
|
* Affinity has changed, but we've already installed a
|
|
* pending. migration_cpu_stop() *must* see this, else
|
|
* we risk a completion of the pending despite having a
|
|
* task on a disallowed CPU.
|
|
*
|
|
* Serialized by p->pi_lock, so this is safe.
|
|
*/
|
|
pending->arg.dest_cpu = dest_cpu;
|
|
}
|
|
}
|
|
pending = p->migration_pending;
|
|
/*
|
|
* - !MIGRATE_ENABLE:
|
|
* we'll have installed a pending if there wasn't one already.
|
|
*
|
|
* - MIGRATE_ENABLE:
|
|
* we're here because the current CPU isn't matching anymore,
|
|
* the only way that can happen is because of a concurrent
|
|
* set_cpus_allowed_ptr() call, which should then still be
|
|
* pending completion.
|
|
*
|
|
* Either way, we really should have a @pending here.
|
|
*/
|
|
if (WARN_ON_ONCE(!pending)) {
|
|
task_rq_unlock(rq, p, rf);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (task_running(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
|
|
/*
|
|
* MIGRATE_ENABLE gets here because 'p == current', but for
|
|
* anything else we cannot do is_migration_disabled(), punt
|
|
* and have the stopper function handle it all race-free.
|
|
*/
|
|
stop_pending = pending->stop_pending;
|
|
if (!stop_pending)
|
|
pending->stop_pending = true;
|
|
|
|
if (flags & SCA_MIGRATE_ENABLE)
|
|
p->migration_flags &= ~MDF_PUSH;
|
|
|
|
task_rq_unlock(rq, p, rf);
|
|
|
|
if (!stop_pending) {
|
|
stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
|
|
&pending->arg, &pending->stop_work);
|
|
}
|
|
|
|
if (flags & SCA_MIGRATE_ENABLE)
|
|
return 0;
|
|
} else {
|
|
|
|
if (!is_migration_disabled(p)) {
|
|
if (task_on_rq_queued(p))
|
|
rq = move_queued_task(rq, rf, p, dest_cpu);
|
|
|
|
if (!pending->stop_pending) {
|
|
p->migration_pending = NULL;
|
|
complete = true;
|
|
}
|
|
}
|
|
task_rq_unlock(rq, p, rf);
|
|
|
|
if (complete)
|
|
complete_all(&pending->done);
|
|
}
|
|
|
|
wait_for_completion(&pending->done);
|
|
|
|
if (refcount_dec_and_test(&pending->refs))
|
|
wake_up_var(&pending->refs); /* No UaF, just an address */
|
|
|
|
/*
|
|
* Block the original owner of &pending until all subsequent callers
|
|
* have seen the completion and decremented the refcount
|
|
*/
|
|
wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
|
|
|
|
/* ARGH */
|
|
WARN_ON_ONCE(my_pending.stop_pending);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Called with both p->pi_lock and rq->lock held; drops both before returning.
|
|
*/
|
|
static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
|
|
const struct cpumask *new_mask,
|
|
u32 flags,
|
|
struct rq *rq,
|
|
struct rq_flags *rf)
|
|
__releases(rq->lock)
|
|
__releases(p->pi_lock)
|
|
{
|
|
const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
|
|
const struct cpumask *cpu_valid_mask = cpu_active_mask;
|
|
bool kthread = p->flags & PF_KTHREAD;
|
|
struct cpumask *user_mask = NULL;
|
|
unsigned int dest_cpu;
|
|
int ret = 0;
|
|
|
|
update_rq_clock(rq);
|
|
|
|
if (kthread || is_migration_disabled(p)) {
|
|
/*
|
|
* Kernel threads are allowed on online && !active CPUs,
|
|
* however, during cpu-hot-unplug, even these might get pushed
|
|
* away if not KTHREAD_IS_PER_CPU.
|
|
*
|
|
* Specifically, migration_disabled() tasks must not fail the
|
|
* cpumask_any_and_distribute() pick below, esp. so on
|
|
* SCA_MIGRATE_ENABLE, otherwise we'll not call
|
|
* set_cpus_allowed_common() and actually reset p->cpus_ptr.
|
|
*/
|
|
cpu_valid_mask = cpu_online_mask;
|
|
}
|
|
|
|
if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) {
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
|
|
/*
|
|
* Must re-check here, to close a race against __kthread_bind(),
|
|
* sched_setaffinity() is not guaranteed to observe the flag.
|
|
*/
|
|
if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
|
|
if (!(flags & SCA_MIGRATE_ENABLE)) {
|
|
if (cpumask_equal(&p->cpus_mask, new_mask))
|
|
goto out;
|
|
|
|
if (WARN_ON_ONCE(p == current &&
|
|
is_migration_disabled(p) &&
|
|
!cpumask_test_cpu(task_cpu(p), new_mask))) {
|
|
ret = -EBUSY;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Picking a ~random cpu helps in cases where we are changing affinity
|
|
* for groups of tasks (ie. cpuset), so that load balancing is not
|
|
* immediately required to distribute the tasks within their new mask.
|
|
*/
|
|
dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask);
|
|
trace_android_rvh_set_cpus_allowed_ptr_locked(cpu_valid_mask, new_mask, &dest_cpu);
|
|
trace_android_rvh_set_cpus_allowed_by_task(cpu_valid_mask, new_mask, p, &dest_cpu);
|
|
|
|
if (dest_cpu >= nr_cpu_ids) {
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
|
|
__do_set_cpus_allowed(p, new_mask, flags);
|
|
|
|
if (flags & SCA_USER)
|
|
user_mask = clear_user_cpus_ptr(p);
|
|
|
|
ret = affine_move_task(rq, p, rf, dest_cpu, flags);
|
|
|
|
kfree(user_mask);
|
|
|
|
return ret;
|
|
|
|
out:
|
|
task_rq_unlock(rq, p, rf);
|
|
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Change a given task's CPU affinity. Migrate the thread to a
|
|
* proper CPU and schedule it away if the CPU it's executing on
|
|
* is removed from the allowed bitmask.
|
|
*
|
|
* NOTE: the caller must have a valid reference to the task, the
|
|
* task must not exit() & deallocate itself prematurely. The
|
|
* call is not atomic; no spinlocks may be held.
|
|
*/
|
|
static int __set_cpus_allowed_ptr(struct task_struct *p,
|
|
const struct cpumask *new_mask, u32 flags)
|
|
{
|
|
struct rq_flags rf;
|
|
struct rq *rq;
|
|
|
|
rq = task_rq_lock(p, &rf);
|
|
return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, &rf);
|
|
}
|
|
|
|
int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
|
{
|
|
return __set_cpus_allowed_ptr(p, new_mask, 0);
|
|
}
|
|
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
|
|
|
|
/*
|
|
* Change a given task's CPU affinity to the intersection of its current
|
|
* affinity mask and @subset_mask, writing the resulting mask to @new_mask
|
|
* and pointing @p->user_cpus_ptr to a copy of the old mask.
|
|
* If the resulting mask is empty, leave the affinity unchanged and return
|
|
* -EINVAL.
|
|
*/
|
|
static int restrict_cpus_allowed_ptr(struct task_struct *p,
|
|
struct cpumask *new_mask,
|
|
const struct cpumask *subset_mask)
|
|
{
|
|
struct cpumask *user_mask = NULL;
|
|
struct rq_flags rf;
|
|
struct rq *rq;
|
|
int err;
|
|
|
|
if (!p->user_cpus_ptr) {
|
|
user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
|
|
if (!user_mask)
|
|
return -ENOMEM;
|
|
}
|
|
|
|
rq = task_rq_lock(p, &rf);
|
|
|
|
/*
|
|
* Forcefully restricting the affinity of a deadline task is
|
|
* likely to cause problems, so fail and noisily override the
|
|
* mask entirely.
|
|
*/
|
|
if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
|
|
err = -EPERM;
|
|
goto err_unlock;
|
|
}
|
|
|
|
if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) {
|
|
err = -EINVAL;
|
|
goto err_unlock;
|
|
}
|
|
|
|
/*
|
|
* We're about to butcher the task affinity, so keep track of what
|
|
* the user asked for in case we're able to restore it later on.
|
|
*/
|
|
if (user_mask) {
|
|
cpumask_copy(user_mask, p->cpus_ptr);
|
|
p->user_cpus_ptr = user_mask;
|
|
}
|
|
|
|
return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf);
|
|
|
|
err_unlock:
|
|
task_rq_unlock(rq, p, &rf);
|
|
kfree(user_mask);
|
|
return err;
|
|
}
|
|
|
|
/*
|
|
* Restrict the CPU affinity of task @p so that it is a subset of
|
|
* task_cpu_possible_mask() and point @p->user_cpu_ptr to a copy of the
|
|
* old affinity mask. If the resulting mask is empty, we warn and walk
|
|
* up the cpuset hierarchy until we find a suitable mask.
|
|
*/
|
|
void force_compatible_cpus_allowed_ptr(struct task_struct *p)
|
|
{
|
|
cpumask_var_t new_mask;
|
|
const struct cpumask *override_mask = task_cpu_possible_mask(p);
|
|
|
|
alloc_cpumask_var(&new_mask, GFP_KERNEL);
|
|
|
|
/*
|
|
* __migrate_task() can fail silently in the face of concurrent
|
|
* offlining of the chosen destination CPU, so take the hotplug
|
|
* lock to ensure that the migration succeeds.
|
|
*/
|
|
trace_android_vh_force_compatible_pre(NULL);
|
|
cpus_read_lock();
|
|
if (!cpumask_available(new_mask))
|
|
goto out_set_mask;
|
|
|
|
if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
|
|
goto out_free_mask;
|
|
|
|
/*
|
|
* We failed to find a valid subset of the affinity mask for the
|
|
* task, so override it based on its cpuset hierarchy.
|
|
*/
|
|
cpuset_cpus_allowed(p, new_mask);
|
|
override_mask = new_mask;
|
|
|
|
out_set_mask:
|
|
if (printk_ratelimit()) {
|
|
printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
|
|
task_pid_nr(p), p->comm,
|
|
cpumask_pr_args(override_mask));
|
|
}
|
|
|
|
WARN_ON(set_cpus_allowed_ptr(p, override_mask));
|
|
out_free_mask:
|
|
cpus_read_unlock();
|
|
trace_android_vh_force_compatible_post(NULL);
|
|
free_cpumask_var(new_mask);
|
|
}
|
|
|
|
static int
|
|
__sched_setaffinity(struct task_struct *p, const struct cpumask *mask);
|
|
|
|
/*
|
|
* Restore the affinity of a task @p which was previously restricted by a
|
|
* call to force_compatible_cpus_allowed_ptr(). This will clear (and free)
|
|
* @p->user_cpus_ptr.
|
|
*
|
|
* It is the caller's responsibility to serialise this with any calls to
|
|
* force_compatible_cpus_allowed_ptr(@p).
|
|
*/
|
|
void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
|
|
{
|
|
struct cpumask *user_mask = p->user_cpus_ptr;
|
|
unsigned long flags;
|
|
|
|
/*
|
|
* Try to restore the old affinity mask. If this fails, then
|
|
* we free the mask explicitly to avoid it being inherited across
|
|
* a subsequent fork().
|
|
*/
|
|
if (!user_mask || !__sched_setaffinity(p, user_mask))
|
|
return;
|
|
|
|
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
|
user_mask = clear_user_cpus_ptr(p);
|
|
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
|
|
|
kfree(user_mask);
|
|
}
|
|
|
|
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
|
{
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
unsigned int state = READ_ONCE(p->__state);
|
|
|
|
/*
|
|
* We should never call set_task_cpu() on a blocked task,
|
|
* ttwu() will sort out the placement.
|
|
*/
|
|
WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
|
|
|
|
/*
|
|
* Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
|
|
* because schedstat_wait_{start,end} rebase migrating task's wait_start
|
|
* time relying on p->on_rq.
|
|
*/
|
|
WARN_ON_ONCE(state == TASK_RUNNING &&
|
|
p->sched_class == &fair_sched_class &&
|
|
(p->on_rq && !task_on_rq_migrating(p)));
|
|
|
|
#ifdef CONFIG_LOCKDEP
|
|
/*
|
|
* The caller should hold either p->pi_lock or rq->lock, when changing
|
|
* a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
|
|
*
|
|
* sched_move_task() holds both and thus holding either pins the cgroup,
|
|
* see task_group().
|
|
*
|
|
* Furthermore, all task_rq users should acquire both locks, see
|
|
* task_rq_lock().
|
|
*/
|
|
WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
|
|
lockdep_is_held(__rq_lockp(task_rq(p)))));
|
|
#endif
|
|
/*
|
|
* Clearly, migrating tasks to offline CPUs is a fairly daft thing.
|
|
*/
|
|
WARN_ON_ONCE(!cpu_online(new_cpu));
|
|
|
|
WARN_ON_ONCE(is_migration_disabled(p));
|
|
#endif
|
|
|
|
trace_sched_migrate_task(p, new_cpu);
|
|
|
|
if (task_cpu(p) != new_cpu) {
|
|
if (p->sched_class->migrate_task_rq)
|
|
p->sched_class->migrate_task_rq(p, new_cpu);
|
|
p->se.nr_migrations++;
|
|
rseq_migrate(p);
|
|
perf_event_task_migrate(p);
|
|
trace_android_rvh_set_task_cpu(p, new_cpu);
|
|
}
|
|
|
|
__set_task_cpu(p, new_cpu);
|
|
}
|
|
EXPORT_SYMBOL_GPL(set_task_cpu);
|
|
|
|
static void __migrate_swap_task(struct task_struct *p, int cpu)
|
|
{
|
|
if (task_on_rq_queued(p)) {
|
|
struct rq *src_rq, *dst_rq;
|
|
struct rq_flags srf, drf;
|
|
|
|
src_rq = task_rq(p);
|
|
dst_rq = cpu_rq(cpu);
|
|
|
|
rq_pin_lock(src_rq, &srf);
|
|
rq_pin_lock(dst_rq, &drf);
|
|
|
|
deactivate_task(src_rq, p, 0);
|
|
set_task_cpu(p, cpu);
|
|
activate_task(dst_rq, p, 0);
|
|
check_preempt_curr(dst_rq, p, 0);
|
|
|
|
rq_unpin_lock(dst_rq, &drf);
|
|
rq_unpin_lock(src_rq, &srf);
|
|
|
|
} else {
|
|
/*
|
|
* Task isn't running anymore; make it appear like we migrated
|
|
* it before it went to sleep. This means on wakeup we make the
|
|
* previous CPU our target instead of where it really is.
|
|
*/
|
|
p->wake_cpu = cpu;
|
|
}
|
|
}
|
|
|
|
struct migration_swap_arg {
|
|
struct task_struct *src_task, *dst_task;
|
|
int src_cpu, dst_cpu;
|
|
};
|
|
|
|
static int migrate_swap_stop(void *data)
|
|
{
|
|
struct migration_swap_arg *arg = data;
|
|
struct rq *src_rq, *dst_rq;
|
|
int ret = -EAGAIN;
|
|
|
|
if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
|
|
return -EAGAIN;
|
|
|
|
src_rq = cpu_rq(arg->src_cpu);
|
|
dst_rq = cpu_rq(arg->dst_cpu);
|
|
|
|
double_raw_lock(&arg->src_task->pi_lock,
|
|
&arg->dst_task->pi_lock);
|
|
double_rq_lock(src_rq, dst_rq);
|
|
|
|
if (task_cpu(arg->dst_task) != arg->dst_cpu)
|
|
goto unlock;
|
|
|
|
if (task_cpu(arg->src_task) != arg->src_cpu)
|
|
goto unlock;
|
|
|
|
if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
|
|
goto unlock;
|
|
|
|
if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
|
|
goto unlock;
|
|
|
|
__migrate_swap_task(arg->src_task, arg->dst_cpu);
|
|
__migrate_swap_task(arg->dst_task, arg->src_cpu);
|
|
|
|
ret = 0;
|
|
|
|
unlock:
|
|
double_rq_unlock(src_rq, dst_rq);
|
|
raw_spin_unlock(&arg->dst_task->pi_lock);
|
|
raw_spin_unlock(&arg->src_task->pi_lock);
|
|
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Cross migrate two tasks
|
|
*/
|
|
int migrate_swap(struct task_struct *cur, struct task_struct *p,
|
|
int target_cpu, int curr_cpu)
|
|
{
|
|
struct migration_swap_arg arg;
|
|
int ret = -EINVAL;
|
|
|
|
arg = (struct migration_swap_arg){
|
|
.src_task = cur,
|
|
.src_cpu = curr_cpu,
|
|
.dst_task = p,
|
|
.dst_cpu = target_cpu,
|
|
};
|
|
|
|
if (arg.src_cpu == arg.dst_cpu)
|
|
goto out;
|
|
|
|
/*
|
|
* These three tests are all lockless; this is OK since all of them
|
|
* will be re-checked with proper locks held further down the line.
|
|
*/
|
|
if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
|
|
goto out;
|
|
|
|
if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
|
|
goto out;
|
|
|
|
if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
|
|
goto out;
|
|
|
|
trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
|
|
ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
|
|
|
|
out:
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(migrate_swap);
|
|
|
|
/*
|
|
* wait_task_inactive - wait for a thread to unschedule.
|
|
*
|
|
* If @match_state is nonzero, it's the @p->state value just checked and
|
|
* not expected to change. If it changes, i.e. @p might have woken up,
|
|
* then return zero. When we succeed in waiting for @p to be off its CPU,
|
|
* we return a positive number (its total switch count). If a second call
|
|
* a short while later returns the same number, the caller can be sure that
|
|
* @p has remained unscheduled the whole time.
|
|
*
|
|
* The caller must ensure that the task *will* unschedule sometime soon,
|
|
* else this function might spin for a *long* time. This function can't
|
|
* be called with interrupts off, or it may introduce deadlock with
|
|
* smp_call_function() if an IPI is sent by the same process we are
|
|
* waiting to become inactive.
|
|
*/
|
|
unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
|
|
{
|
|
int running, queued;
|
|
struct rq_flags rf;
|
|
unsigned long ncsw;
|
|
struct rq *rq;
|
|
|
|
for (;;) {
|
|
/*
|
|
* We do the initial early heuristics without holding
|
|
* any task-queue locks at all. We'll only try to get
|
|
* the runqueue lock when things look like they will
|
|
* work out!
|
|
*/
|
|
rq = task_rq(p);
|
|
|
|
/*
|
|
* If the task is actively running on another CPU
|
|
* still, just relax and busy-wait without holding
|
|
* any locks.
|
|
*
|
|
* NOTE! Since we don't hold any locks, it's not
|
|
* even sure that "rq" stays as the right runqueue!
|
|
* But we don't care, since "task_running()" will
|
|
* return false if the runqueue has changed and p
|
|
* is actually now running somewhere else!
|
|
*/
|
|
while (task_running(rq, p)) {
|
|
if (match_state && unlikely(READ_ONCE(p->__state) != match_state))
|
|
return 0;
|
|
cpu_relax();
|
|
}
|
|
|
|
/*
|
|
* Ok, time to look more closely! We need the rq
|
|
* lock now, to be *sure*. If we're wrong, we'll
|
|
* just go back and repeat.
|
|
*/
|
|
rq = task_rq_lock(p, &rf);
|
|
trace_sched_wait_task(p);
|
|
running = task_running(rq, p);
|
|
queued = task_on_rq_queued(p);
|
|
ncsw = 0;
|
|
if (!match_state || READ_ONCE(p->__state) == match_state)
|
|
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
|
|
task_rq_unlock(rq, p, &rf);
|
|
|
|
/*
|
|
* If it changed from the expected state, bail out now.
|
|
*/
|
|
if (unlikely(!ncsw))
|
|
break;
|
|
|
|
/*
|
|
* Was it really running after all now that we
|
|
* checked with the proper locks actually held?
|
|
*
|
|
* Oops. Go back and try again..
|
|
*/
|
|
if (unlikely(running)) {
|
|
cpu_relax();
|
|
continue;
|
|
}
|
|
|
|
/*
|
|
* It's not enough that it's not actively running,
|
|
* it must be off the runqueue _entirely_, and not
|
|
* preempted!
|
|
*
|
|
* So if it was still runnable (but just not actively
|
|
* running right now), it's preempted, and we should
|
|
* yield - it could be a while.
|
|
*/
|
|
if (unlikely(queued)) {
|
|
ktime_t to = NSEC_PER_SEC / HZ;
|
|
|
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
|
schedule_hrtimeout(&to, HRTIMER_MODE_REL);
|
|
continue;
|
|
}
|
|
|
|
/*
|
|
* Ahh, all good. It wasn't running, and it wasn't
|
|
* runnable, which means that it will never become
|
|
* running in the future either. We're all done!
|
|
*/
|
|
break;
|
|
}
|
|
|
|
return ncsw;
|
|
}
|
|
|
|
/***
|
|
* kick_process - kick a running thread to enter/exit the kernel
|
|
* @p: the to-be-kicked thread
|
|
*
|
|
* Cause a process which is running on another CPU to enter
|
|
* kernel-mode, without any delay. (to get signals handled.)
|
|
*
|
|
* NOTE: this function doesn't have to take the runqueue lock,
|
|
* because all it wants to ensure is that the remote task enters
|
|
* the kernel. If the IPI races and the task has been migrated
|
|
* to another CPU then no harm is done and the purpose has been
|
|
* achieved as well.
|
|
*/
|
|
void kick_process(struct task_struct *p)
|
|
{
|
|
int cpu;
|
|
|
|
preempt_disable();
|
|
cpu = task_cpu(p);
|
|
if ((cpu != smp_processor_id()) && task_curr(p))
|
|
smp_send_reschedule(cpu);
|
|
preempt_enable();
|
|
}
|
|
EXPORT_SYMBOL_GPL(kick_process);
|
|
|
|
/*
|
|
* ->cpus_ptr is protected by both rq->lock and p->pi_lock
|
|
*
|
|
* A few notes on cpu_active vs cpu_online:
|
|
*
|
|
* - cpu_active must be a subset of cpu_online
|
|
*
|
|
* - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
|
|
* see __set_cpus_allowed_ptr(). At this point the newly online
|
|
* CPU isn't yet part of the sched domains, and balancing will not
|
|
* see it.
|
|
*
|
|
* - on CPU-down we clear cpu_active() to mask the sched domains and
|
|
* avoid the load balancer to place new tasks on the to be removed
|
|
* CPU. Existing tasks will remain running there and will be taken
|
|
* off.
|
|
*
|
|
* This means that fallback selection must not select !active CPUs.
|
|
* And can assume that any active CPU must be online. Conversely
|
|
* select_task_rq() below may allow selection of !active CPUs in order
|
|
* to satisfy the above rules.
|
|
*/
|
|
int select_fallback_rq(int cpu, struct task_struct *p)
|
|
{
|
|
int nid = cpu_to_node(cpu);
|
|
const struct cpumask *nodemask = NULL;
|
|
enum { cpuset, possible, fail } state = cpuset;
|
|
int dest_cpu = -1;
|
|
|
|
trace_android_rvh_select_fallback_rq(cpu, p, &dest_cpu);
|
|
if (dest_cpu >= 0)
|
|
return dest_cpu;
|
|
|
|
/*
|
|
* If the node that the CPU is on has been offlined, cpu_to_node()
|
|
* will return -1. There is no CPU on the node, and we should
|
|
* select the CPU on the other node.
|
|
*/
|
|
if (nid != -1) {
|
|
nodemask = cpumask_of_node(nid);
|
|
|
|
/* Look for allowed, online CPU in same node. */
|
|
for_each_cpu(dest_cpu, nodemask) {
|
|
if (is_cpu_allowed(p, dest_cpu))
|
|
return dest_cpu;
|
|
}
|
|
}
|
|
|
|
for (;;) {
|
|
/* Any allowed, online CPU? */
|
|
for_each_cpu(dest_cpu, p->cpus_ptr) {
|
|
if (!is_cpu_allowed(p, dest_cpu))
|
|
continue;
|
|
|
|
goto out;
|
|
}
|
|
|
|
/* No more Mr. Nice Guy. */
|
|
switch (state) {
|
|
case cpuset:
|
|
if (cpuset_cpus_allowed_fallback(p)) {
|
|
state = possible;
|
|
break;
|
|
}
|
|
fallthrough;
|
|
case possible:
|
|
/*
|
|
* XXX When called from select_task_rq() we only
|
|
* hold p->pi_lock and again violate locking order.
|
|
*
|
|
* More yuck to audit.
|
|
*/
|
|
do_set_cpus_allowed(p, task_cpu_possible_mask(p));
|
|
state = fail;
|
|
break;
|
|
case fail:
|
|
BUG();
|
|
break;
|
|
}
|
|
}
|
|
|
|
out:
|
|
if (state != cpuset) {
|
|
/*
|
|
* Don't tell them about moving exiting tasks or
|
|
* kernel threads (both mm NULL), since they never
|
|
* leave kernel.
|
|
*/
|
|
if (p->mm && printk_ratelimit()) {
|
|
printk_deferred("process %d (%s) no longer affine to cpu%d\n",
|
|
task_pid_nr(p), p->comm, cpu);
|
|
}
|
|
}
|
|
|
|
return dest_cpu;
|
|
}
|
|
EXPORT_SYMBOL_GPL(select_fallback_rq);
|
|
|
|
/*
|
|
* The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
|
|
*/
|
|
static inline
|
|
int select_task_rq(struct task_struct *p, int cpu, int wake_flags)
|
|
{
|
|
lockdep_assert_held(&p->pi_lock);
|
|
|
|
if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p))
|
|
cpu = p->sched_class->select_task_rq(p, cpu, wake_flags);
|
|
else
|
|
cpu = cpumask_any(p->cpus_ptr);
|
|
|
|
/*
|
|
* In order not to call set_task_cpu() on a blocking task we need
|
|
* to rely on ttwu() to place the task on a valid ->cpus_ptr
|
|
* CPU.
|
|
*
|
|
* Since this is common to all placement strategies, this lives here.
|
|
*
|
|
* [ this allows ->select_task() to simply return task_cpu(p) and
|
|
* not worry about this generic constraint ]
|
|
*/
|
|
if (unlikely(!is_cpu_allowed(p, cpu)))
|
|
cpu = select_fallback_rq(task_cpu(p), p);
|
|
|
|
return cpu;
|
|
}
|
|
|
|
void sched_set_stop_task(int cpu, struct task_struct *stop)
|
|
{
|
|
static struct lock_class_key stop_pi_lock;
|
|
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
|
|
struct task_struct *old_stop = cpu_rq(cpu)->stop;
|
|
|
|
if (stop) {
|
|
/*
|
|
* Make it appear like a SCHED_FIFO task, its something
|
|
* userspace knows about and won't get confused about.
|
|
*
|
|
* Also, it will make PI more or less work without too
|
|
* much confusion -- but then, stop work should not
|
|
* rely on PI working anyway.
|
|
*/
|
|
sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
|
|
|
|
stop->sched_class = &stop_sched_class;
|
|
|
|
/*
|
|
* The PI code calls rt_mutex_setprio() with ->pi_lock held to
|
|
* adjust the effective priority of a task. As a result,
|
|
* rt_mutex_setprio() can trigger (RT) balancing operations,
|
|
* which can then trigger wakeups of the stop thread to push
|
|
* around the current task.
|
|
*
|
|
* The stop task itself will never be part of the PI-chain, it
|
|
* never blocks, therefore that ->pi_lock recursion is safe.
|
|
* Tell lockdep about this by placing the stop->pi_lock in its
|
|
* own class.
|
|
*/
|
|
lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
|
|
}
|
|
|
|
cpu_rq(cpu)->stop = stop;
|
|
|
|
if (old_stop) {
|
|
/*
|
|
* Reset it back to a normal scheduling class so that
|
|
* it can die in pieces.
|
|
*/
|
|
old_stop->sched_class = &rt_sched_class;
|
|
}
|
|
}
|
|
|
|
#else /* CONFIG_SMP */
|
|
|
|
static inline int __set_cpus_allowed_ptr(struct task_struct *p,
|
|
const struct cpumask *new_mask,
|
|
u32 flags)
|
|
{
|
|
return set_cpus_allowed_ptr(p, new_mask);
|
|
}
|
|
|
|
static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
|
|
|
|
static inline bool rq_has_pinned_tasks(struct rq *rq)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
#endif /* !CONFIG_SMP */
|
|
|
|
static void
|
|
ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
|
|
{
|
|
struct rq *rq;
|
|
|
|
if (!schedstat_enabled())
|
|
return;
|
|
|
|
rq = this_rq();
|
|
|
|
#ifdef CONFIG_SMP
|
|
if (cpu == rq->cpu) {
|
|
__schedstat_inc(rq->ttwu_local);
|
|
__schedstat_inc(p->se.statistics.nr_wakeups_local);
|
|
} else {
|
|
struct sched_domain *sd;
|
|
|
|
__schedstat_inc(p->se.statistics.nr_wakeups_remote);
|
|
rcu_read_lock();
|
|
for_each_domain(rq->cpu, sd) {
|
|
if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
|
|
__schedstat_inc(sd->ttwu_wake_remote);
|
|
break;
|
|
}
|
|
}
|
|
rcu_read_unlock();
|
|
}
|
|
|
|
if (wake_flags & WF_MIGRATED)
|
|
__schedstat_inc(p->se.statistics.nr_wakeups_migrate);
|
|
#endif /* CONFIG_SMP */
|
|
|
|
__schedstat_inc(rq->ttwu_count);
|
|
__schedstat_inc(p->se.statistics.nr_wakeups);
|
|
|
|
if (wake_flags & WF_SYNC)
|
|
__schedstat_inc(p->se.statistics.nr_wakeups_sync);
|
|
}
|
|
|
|
/*
|
|
* Mark the task runnable and perform wakeup-preemption.
|
|
*/
|
|
static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
|
|
struct rq_flags *rf)
|
|
{
|
|
check_preempt_curr(rq, p, wake_flags);
|
|
WRITE_ONCE(p->__state, TASK_RUNNING);
|
|
trace_sched_wakeup(p);
|
|
|
|
#ifdef CONFIG_SMP
|
|
if (p->sched_class->task_woken) {
|
|
/*
|
|
* Our task @p is fully woken up and running; so it's safe to
|
|
* drop the rq->lock, hereafter rq is only used for statistics.
|
|
*/
|
|
rq_unpin_lock(rq, rf);
|
|
p->sched_class->task_woken(rq, p);
|
|
rq_repin_lock(rq, rf);
|
|
}
|
|
|
|
if (rq->idle_stamp) {
|
|
u64 delta = rq_clock(rq) - rq->idle_stamp;
|
|
u64 max = 2*rq->max_idle_balance_cost;
|
|
|
|
update_avg(&rq->avg_idle, delta);
|
|
|
|
if (rq->avg_idle > max)
|
|
rq->avg_idle = max;
|
|
|
|
rq->wake_stamp = jiffies;
|
|
rq->wake_avg_idle = rq->avg_idle / 2;
|
|
|
|
rq->idle_stamp = 0;
|
|
}
|
|
#endif
|
|
}
|
|
|
|
static void
|
|
ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
|
|
struct rq_flags *rf)
|
|
{
|
|
int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
|
|
|
|
if (wake_flags & WF_SYNC)
|
|
en_flags |= ENQUEUE_WAKEUP_SYNC;
|
|
|
|
lockdep_assert_rq_held(rq);
|
|
|
|
if (p->sched_contributes_to_load)
|
|
rq->nr_uninterruptible--;
|
|
|
|
#ifdef CONFIG_SMP
|
|
if (wake_flags & WF_MIGRATED)
|
|
en_flags |= ENQUEUE_MIGRATED;
|
|
else
|
|
#endif
|
|
if (p->in_iowait) {
|
|
delayacct_blkio_end(p);
|
|
atomic_dec(&task_rq(p)->nr_iowait);
|
|
}
|
|
|
|
activate_task(rq, p, en_flags);
|
|
ttwu_do_wakeup(rq, p, wake_flags, rf);
|
|
}
|
|
|
|
/*
|
|
* Consider @p being inside a wait loop:
|
|
*
|
|
* for (;;) {
|
|
* set_current_state(TASK_UNINTERRUPTIBLE);
|
|
*
|
|
* if (CONDITION)
|
|
* break;
|
|
*
|
|
* schedule();
|
|
* }
|
|
* __set_current_state(TASK_RUNNING);
|
|
*
|
|
* between set_current_state() and schedule(). In this case @p is still
|
|
* runnable, so all that needs doing is change p->state back to TASK_RUNNING in
|
|
* an atomic manner.
|
|
*
|
|
* By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
|
|
* then schedule() must still happen and p->state can be changed to
|
|
* TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
|
|
* need to do a full wakeup with enqueue.
|
|
*
|
|
* Returns: %true when the wakeup is done,
|
|
* %false otherwise.
|
|
*/
|
|
static int ttwu_runnable(struct task_struct *p, int wake_flags)
|
|
{
|
|
struct rq_flags rf;
|
|
struct rq *rq;
|
|
int ret = 0;
|
|
|
|
rq = __task_rq_lock(p, &rf);
|
|
if (task_on_rq_queued(p)) {
|
|
/* check_preempt_curr() may use rq clock */
|
|
update_rq_clock(rq);
|
|
ttwu_do_wakeup(rq, p, wake_flags, &rf);
|
|
ret = 1;
|
|
}
|
|
__task_rq_unlock(rq, &rf);
|
|
|
|
return ret;
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
void sched_ttwu_pending(void *arg)
|
|
{
|
|
struct llist_node *llist = arg;
|
|
struct rq *rq = this_rq();
|
|
struct task_struct *p, *t;
|
|
struct rq_flags rf;
|
|
|
|
if (!llist)
|
|
return;
|
|
|
|
/*
|
|
* rq::ttwu_pending racy indication of out-standing wakeups.
|
|
* Races such that false-negatives are possible, since they
|
|
* are shorter lived that false-positives would be.
|
|
*/
|
|
WRITE_ONCE(rq->ttwu_pending, 0);
|
|
|
|
rq_lock_irqsave(rq, &rf);
|
|
update_rq_clock(rq);
|
|
|
|
llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
|
|
if (WARN_ON_ONCE(p->on_cpu))
|
|
smp_cond_load_acquire(&p->on_cpu, !VAL);
|
|
|
|
if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
|
|
set_task_cpu(p, cpu_of(rq));
|
|
|
|
ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
|
|
}
|
|
|
|
rq_unlock_irqrestore(rq, &rf);
|
|
}
|
|
|
|
void send_call_function_single_ipi(int cpu)
|
|
{
|
|
struct rq *rq = cpu_rq(cpu);
|
|
|
|
if (!set_nr_if_polling(rq->idle))
|
|
arch_send_call_function_single_ipi(cpu);
|
|
else
|
|
trace_sched_wake_idle_without_ipi(cpu);
|
|
}
|
|
|
|
/*
|
|
* Queue a task on the target CPUs wake_list and wake the CPU via IPI if
|
|
* necessary. The wakee CPU on receipt of the IPI will queue the task
|
|
* via sched_ttwu_wakeup() for activation so the wakee incurs the cost
|
|
* of the wakeup instead of the waker.
|
|
*/
|
|
static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
|
|
{
|
|
struct rq *rq = cpu_rq(cpu);
|
|
|
|
p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
|
|
|
|
WRITE_ONCE(rq->ttwu_pending, 1);
|
|
__smp_call_single_queue(cpu, &p->wake_entry.llist);
|
|
}
|
|
|
|
void wake_up_if_idle(int cpu)
|
|
{
|
|
struct rq *rq = cpu_rq(cpu);
|
|
struct rq_flags rf;
|
|
|
|
rcu_read_lock();
|
|
|
|
if (!is_idle_task(rcu_dereference(rq->curr)))
|
|
goto out;
|
|
|
|
if (set_nr_if_polling(rq->idle)) {
|
|
trace_sched_wake_idle_without_ipi(cpu);
|
|
} else {
|
|
rq_lock_irqsave(rq, &rf);
|
|
if (is_idle_task(rq->curr))
|
|
smp_send_reschedule(cpu);
|
|
/* Else CPU is not idle, do nothing here: */
|
|
rq_unlock_irqrestore(rq, &rf);
|
|
}
|
|
|
|
out:
|
|
rcu_read_unlock();
|
|
}
|
|
EXPORT_SYMBOL_GPL(wake_up_if_idle);
|
|
|
|
bool cpus_share_cache(int this_cpu, int that_cpu)
|
|
{
|
|
if (this_cpu == that_cpu)
|
|
return true;
|
|
|
|
return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
|
|
}
|
|
|
|
static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
|
|
{
|
|
/*
|
|
* Do not complicate things with the async wake_list while the CPU is
|
|
* in hotplug state.
|
|
*/
|
|
if (!cpu_active(cpu))
|
|
return false;
|
|
|
|
/* Ensure the task will still be allowed to run on the CPU. */
|
|
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
|
|
return false;
|
|
|
|
/*
|
|
* If the CPU does not share cache, then queue the task on the
|
|
* remote rqs wakelist to avoid accessing remote data.
|
|
*/
|
|
if (!cpus_share_cache(smp_processor_id(), cpu))
|
|
return true;
|
|
|
|
if (cpu == smp_processor_id())
|
|
return false;
|
|
|
|
/*
|
|
* If the wakee cpu is idle, or the task is descheduling and the
|
|
* only running task on the CPU, then use the wakelist to offload
|
|
* the task activation to the idle (or soon-to-be-idle) CPU as
|
|
* the current CPU is likely busy. nr_running is checked to
|
|
* avoid unnecessary task stacking.
|
|
*
|
|
* Note that we can only get here with (wakee) p->on_rq=0,
|
|
* p->on_cpu can be whatever, we've done the dequeue, so
|
|
* the wakee has been accounted out of ->nr_running.
|
|
*/
|
|
if (!cpu_rq(cpu)->nr_running)
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
|
|
{
|
|
bool cond = false;
|
|
|
|
trace_android_rvh_ttwu_cond(cpu, &cond);
|
|
|
|
if ((sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) || cond) {
|
|
sched_clock_cpu(cpu); /* Sync clocks across CPUs */
|
|
__ttwu_queue_wakelist(p, cpu, wake_flags);
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
#else /* !CONFIG_SMP */
|
|
|
|
static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
|
|
{
|
|
struct rq *rq = cpu_rq(cpu);
|
|
struct rq_flags rf;
|
|
|
|
if (ttwu_queue_wakelist(p, cpu, wake_flags))
|
|
return;
|
|
|
|
rq_lock(rq, &rf);
|
|
update_rq_clock(rq);
|
|
ttwu_do_activate(rq, p, wake_flags, &rf);
|
|
rq_unlock(rq, &rf);
|
|
}
|
|
|
|
/*
|
|
* Invoked from try_to_wake_up() to check whether the task can be woken up.
|
|
*
|
|
* The caller holds p::pi_lock if p != current or has preemption
|
|
* disabled when p == current.
|
|
*
|
|
* The rules of PREEMPT_RT saved_state:
|
|
*
|
|
* The related locking code always holds p::pi_lock when updating
|
|
* p::saved_state, which means the code is fully serialized in both cases.
|
|
*
|
|
* The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other
|
|
* bits set. This allows to distinguish all wakeup scenarios.
|
|
*/
|
|
static __always_inline
|
|
bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
|
|
{
|
|
if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
|
|
WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
|
|
state != TASK_RTLOCK_WAIT);
|
|
}
|
|
|
|
if (READ_ONCE(p->__state) & state) {
|
|
*success = 1;
|
|
return true;
|
|
}
|
|
|
|
#ifdef CONFIG_PREEMPT_RT
|
|
/*
|
|
* Saved state preserves the task state across blocking on
|
|
* an RT lock. If the state matches, set p::saved_state to
|
|
* TASK_RUNNING, but do not wake the task because it waits
|
|
* for a lock wakeup. Also indicate success because from
|
|
* the regular waker's point of view this has succeeded.
|
|
*
|
|
* After acquiring the lock the task will restore p::__state
|
|
* from p::saved_state which ensures that the regular
|
|
* wakeup is not lost. The restore will also set
|
|
* p::saved_state to TASK_RUNNING so any further tests will
|
|
* not result in false positives vs. @success
|
|
*/
|
|
if (p->saved_state & state) {
|
|
p->saved_state = TASK_RUNNING;
|
|
*success = 1;
|
|
}
|
|
#endif
|
|
return false;
|
|
}
|
|
|
|
/*
|
|
* Notes on Program-Order guarantees on SMP systems.
|
|
*
|
|
* MIGRATION
|
|
*
|
|
* The basic program-order guarantee on SMP systems is that when a task [t]
|
|
* migrates, all its activity on its old CPU [c0] happens-before any subsequent
|
|
* execution on its new CPU [c1].
|
|
*
|
|
* For migration (of runnable tasks) this is provided by the following means:
|
|
*
|
|
* A) UNLOCK of the rq(c0)->lock scheduling out task t
|
|
* B) migration for t is required to synchronize *both* rq(c0)->lock and
|
|
* rq(c1)->lock (if not at the same time, then in that order).
|
|
* C) LOCK of the rq(c1)->lock scheduling in task
|
|
*
|
|
* Release/acquire chaining guarantees that B happens after A and C after B.
|
|
* Note: the CPU doing B need not be c0 or c1
|
|
*
|
|
* Example:
|
|
*
|
|
* CPU0 CPU1 CPU2
|
|
*
|
|
* LOCK rq(0)->lock
|
|
* sched-out X
|
|
* sched-in Y
|
|
* UNLOCK rq(0)->lock
|
|
*
|
|
* LOCK rq(0)->lock // orders against CPU0
|
|
* dequeue X
|
|
* UNLOCK rq(0)->lock
|
|
*
|
|
* LOCK rq(1)->lock
|
|
* enqueue X
|
|
* UNLOCK rq(1)->lock
|
|
*
|
|
* LOCK rq(1)->lock // orders against CPU2
|
|
* sched-out Z
|
|
* sched-in X
|
|
* UNLOCK rq(1)->lock
|
|
*
|
|
*
|
|
* BLOCKING -- aka. SLEEP + WAKEUP
|
|
*
|
|
* For blocking we (obviously) need to provide the same guarantee as for
|
|
* migration. However the means are completely different as there is no lock
|
|
* chain to provide order. Instead we do:
|
|
*
|
|
* 1) smp_store_release(X->on_cpu, 0) -- finish_task()
|
|
* 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
|
|
*
|
|
* Example:
|
|
*
|
|
* CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
|
|
*
|
|
* LOCK rq(0)->lock LOCK X->pi_lock
|
|
* dequeue X
|
|
* sched-out X
|
|
* smp_store_release(X->on_cpu, 0);
|
|
*
|
|
* smp_cond_load_acquire(&X->on_cpu, !VAL);
|
|
* X->state = WAKING
|
|
* set_task_cpu(X,2)
|
|
*
|
|
* LOCK rq(2)->lock
|
|
* enqueue X
|
|
* X->state = RUNNING
|
|
* UNLOCK rq(2)->lock
|
|
*
|
|
* LOCK rq(2)->lock // orders against CPU1
|
|
* sched-out Z
|
|
* sched-in X
|
|
* UNLOCK rq(2)->lock
|
|
*
|
|
* UNLOCK X->pi_lock
|
|
* UNLOCK rq(0)->lock
|
|
*
|
|
*
|
|
* However, for wakeups there is a second guarantee we must provide, namely we
|
|
* must ensure that CONDITION=1 done by the caller can not be reordered with
|
|
* accesses to the task state; see try_to_wake_up() and set_current_state().
|
|
*/
|
|
|
|
/**
|
|
* try_to_wake_up - wake up a thread
|
|
* @p: the thread to be awakened
|
|
* @state: the mask of task states that can be woken
|
|
* @wake_flags: wake modifier flags (WF_*)
|
|
*
|
|
* Conceptually does:
|
|
*
|
|
* If (@state & @p->state) @p->state = TASK_RUNNING.
|
|
*
|
|
* If the task was not queued/runnable, also place it back on a runqueue.
|
|
*
|
|
* This function is atomic against schedule() which would dequeue the task.
|
|
*
|
|
* It issues a full memory barrier before accessing @p->state, see the comment
|
|
* with set_current_state().
|
|
*
|
|
* Uses p->pi_lock to serialize against concurrent wake-ups.
|
|
*
|
|
* Relies on p->pi_lock stabilizing:
|
|
* - p->sched_class
|
|
* - p->cpus_ptr
|
|
* - p->sched_task_group
|
|
* in order to do migration, see its use of select_task_rq()/set_task_cpu().
|
|
*
|
|
* Tries really hard to only take one task_rq(p)->lock for performance.
|
|
* Takes rq->lock in:
|
|
* - ttwu_runnable() -- old rq, unavoidable, see comment there;
|
|
* - ttwu_queue() -- new rq, for enqueue of the task;
|
|
* - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
|
|
*
|
|
* As a consequence we race really badly with just about everything. See the
|
|
* many memory barriers and their comments for details.
|
|
*
|
|
* Return: %true if @p->state changes (an actual wakeup was done),
|
|
* %false otherwise.
|
|
*/
|
|
static int
|
|
try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
|
{
|
|
unsigned long flags;
|
|
int cpu, success = 0;
|
|
|
|
preempt_disable();
|
|
if (p == current) {
|
|
/*
|
|
* We're waking current, this means 'p->on_rq' and 'task_cpu(p)
|
|
* == smp_processor_id()'. Together this means we can special
|
|
* case the whole 'p->on_rq && ttwu_runnable()' case below
|
|
* without taking any locks.
|
|
*
|
|
* In particular:
|
|
* - we rely on Program-Order guarantees for all the ordering,
|
|
* - we're serialized against set_special_state() by virtue of
|
|
* it disabling IRQs (this allows not taking ->pi_lock).
|
|
*/
|
|
if (!ttwu_state_match(p, state, &success))
|
|
goto out;
|
|
|
|
trace_sched_waking(p);
|
|
WRITE_ONCE(p->__state, TASK_RUNNING);
|
|
trace_sched_wakeup(p);
|
|
goto out;
|
|
}
|
|
|
|
/*
|
|
* If we are going to wake up a thread waiting for CONDITION we
|
|
* need to ensure that CONDITION=1 done by the caller can not be
|
|
* reordered with p->state check below. This pairs with smp_store_mb()
|
|
* in set_current_state() that the waiting thread does.
|
|
*/
|
|
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
|
smp_mb__after_spinlock();
|
|
if (!ttwu_state_match(p, state, &success))
|
|
goto unlock;
|
|
|
|
#ifdef CONFIG_FREEZER
|
|
/*
|
|
* If we're going to wake up a thread which may be frozen, then
|
|
* we can only do so if we have an active CPU which is capable of
|
|
* running it. This may not be the case when resuming from suspend,
|
|
* as the secondary CPUs may not yet be back online. See __thaw_task()
|
|
* for the actual wakeup.
|
|
*/
|
|
if (unlikely(frozen_or_skipped(p)) &&
|
|
!cpumask_intersects(cpu_active_mask, task_cpu_possible_mask(p)))
|
|
goto unlock;
|
|
#endif
|
|
|
|
trace_sched_waking(p);
|
|
|
|
/*
|
|
* Ensure we load p->on_rq _after_ p->state, otherwise it would
|
|
* be possible to, falsely, observe p->on_rq == 0 and get stuck
|
|
* in smp_cond_load_acquire() below.
|
|
*
|
|
* sched_ttwu_pending() try_to_wake_up()
|
|
* STORE p->on_rq = 1 LOAD p->state
|
|
* UNLOCK rq->lock
|
|
*
|
|
* __schedule() (switch to task 'p')
|
|
* LOCK rq->lock smp_rmb();
|
|
* smp_mb__after_spinlock();
|
|
* UNLOCK rq->lock
|
|
*
|
|
* [task p]
|
|
* STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
|
|
*
|
|
* Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
|
|
* __schedule(). See the comment for smp_mb__after_spinlock().
|
|
*
|
|
* A similar smb_rmb() lives in try_invoke_on_locked_down_task().
|
|
*/
|
|
smp_rmb();
|
|
if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
|
|
goto unlock;
|
|
|
|
if (READ_ONCE(p->__state) & TASK_UNINTERRUPTIBLE)
|
|
trace_sched_blocked_reason(p);
|
|
|
|
#ifdef CONFIG_SMP
|
|
/*
|
|
* Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
|
|
* possible to, falsely, observe p->on_cpu == 0.
|
|
*
|
|
* One must be running (->on_cpu == 1) in order to remove oneself
|
|
* from the runqueue.
|
|
*
|
|
* __schedule() (switch to task 'p') try_to_wake_up()
|
|
* STORE p->on_cpu = 1 LOAD p->on_rq
|
|
* UNLOCK rq->lock
|
|
*
|
|
* __schedule() (put 'p' to sleep)
|
|
* LOCK rq->lock smp_rmb();
|
|
* smp_mb__after_spinlock();
|
|
* STORE p->on_rq = 0 LOAD p->on_cpu
|
|
*
|
|
* Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
|
|
* __schedule(). See the comment for smp_mb__after_spinlock().
|
|
*
|
|
* Form a control-dep-acquire with p->on_rq == 0 above, to ensure
|
|
* schedule()'s deactivate_task() has 'happened' and p will no longer
|
|
* care about it's own p->state. See the comment in __schedule().
|
|
*/
|
|
smp_acquire__after_ctrl_dep();
|
|
|
|
/*
|
|
* We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
|
|
* == 0), which means we need to do an enqueue, change p->state to
|
|
* TASK_WAKING such that we can unlock p->pi_lock before doing the
|
|
* enqueue, such as ttwu_queue_wakelist().
|
|
*/
|
|
WRITE_ONCE(p->__state, TASK_WAKING);
|
|
|
|
/*
|
|
* If the owning (remote) CPU is still in the middle of schedule() with
|
|
* this task as prev, considering queueing p on the remote CPUs wake_list
|
|
* which potentially sends an IPI instead of spinning on p->on_cpu to
|
|
* let the waker make forward progress. This is safe because IRQs are
|
|
* disabled and the IPI will deliver after on_cpu is cleared.
|
|
*
|
|
* Ensure we load task_cpu(p) after p->on_cpu:
|
|
*
|
|
* set_task_cpu(p, cpu);
|
|
* STORE p->cpu = @cpu
|
|
* __schedule() (switch to task 'p')
|
|
* LOCK rq->lock
|
|
* smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
|
|
* STORE p->on_cpu = 1 LOAD p->cpu
|
|
*
|
|
* to ensure we observe the correct CPU on which the task is currently
|
|
* scheduling.
|
|
*/
|
|
if (smp_load_acquire(&p->on_cpu) &&
|
|
ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
|
|
goto unlock;
|
|
|
|
/*
|
|
* If the owning (remote) CPU is still in the middle of schedule() with
|
|
* this task as prev, wait until it's done referencing the task.
|
|
*
|
|
* Pairs with the smp_store_release() in finish_task().
|
|
*
|
|
* This ensures that tasks getting woken will be fully ordered against
|
|
* their previous state and preserve Program Order.
|
|
*/
|
|
smp_cond_load_acquire(&p->on_cpu, !VAL);
|
|
|
|
trace_android_rvh_try_to_wake_up(p);
|
|
|
|
cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
|
|
if (task_cpu(p) != cpu) {
|
|
if (p->in_iowait) {
|
|
delayacct_blkio_end(p);
|
|
atomic_dec(&task_rq(p)->nr_iowait);
|
|
}
|
|
|
|
wake_flags |= WF_MIGRATED;
|
|
psi_ttwu_dequeue(p);
|
|
set_task_cpu(p, cpu);
|
|
}
|
|
#else
|
|
cpu = task_cpu(p);
|
|
#endif /* CONFIG_SMP */
|
|
|
|
ttwu_queue(p, cpu, wake_flags);
|
|
unlock:
|
|
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
|
out:
|
|
if (success) {
|
|
trace_android_rvh_try_to_wake_up_success(p);
|
|
ttwu_stat(p, task_cpu(p), wake_flags);
|
|
}
|
|
preempt_enable();
|
|
|
|
return success;
|
|
}
|
|
|
|
/**
|
|
* try_invoke_on_locked_down_task - Invoke a function on task in fixed state
|
|
* @p: Process for which the function is to be invoked, can be @current.
|
|
* @func: Function to invoke.
|
|
* @arg: Argument to function.
|
|
*
|
|
* If the specified task can be quickly locked into a definite state
|
|
* (either sleeping or on a given runqueue), arrange to keep it in that
|
|
* state while invoking @func(@arg). This function can use ->on_rq and
|
|
* task_curr() to work out what the state is, if required. Given that
|
|
* @func can be invoked with a runqueue lock held, it had better be quite
|
|
* lightweight.
|
|
*
|
|
* Returns:
|
|
* @false if the task slipped out from under the locks.
|
|
* @true if the task was locked onto a runqueue or is sleeping.
|
|
* However, @func can override this by returning @false.
|
|
*/
|
|
bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg)
|
|
{
|
|
struct rq_flags rf;
|
|
bool ret = false;
|
|
struct rq *rq;
|
|
|
|
raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
|
|
if (p->on_rq) {
|
|
rq = __task_rq_lock(p, &rf);
|
|
if (task_rq(p) == rq)
|
|
ret = func(p, arg);
|
|
rq_unlock(rq, &rf);
|
|
} else {
|
|
switch (READ_ONCE(p->__state)) {
|
|
case TASK_RUNNING:
|
|
case TASK_WAKING:
|
|
break;
|
|
default:
|
|
smp_rmb(); // See smp_rmb() comment in try_to_wake_up().
|
|
if (!p->on_rq)
|
|
ret = func(p, arg);
|
|
}
|
|
}
|
|
raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
|
|
return ret;
|
|
}
|
|
|
|
/**
|
|
* wake_up_process - Wake up a specific process
|
|
* @p: The process to be woken up.
|
|
*
|
|
* Attempt to wake up the nominated process and move it to the set of runnable
|
|
* processes.
|
|
*
|
|
* Return: 1 if the process was woken up, 0 if it was already running.
|
|
*
|
|
* This function executes a full memory barrier before accessing the task state.
|
|
*/
|
|
int wake_up_process(struct task_struct *p)
|
|
{
|
|
return try_to_wake_up(p, TASK_NORMAL, 0);
|
|
}
|
|
EXPORT_SYMBOL(wake_up_process);
|
|
|
|
int wake_up_state(struct task_struct *p, unsigned int state)
|
|
{
|
|
return try_to_wake_up(p, state, 0);
|
|
}
|
|
|
|
/*
|
|
* Perform scheduler related setup for a newly forked process p.
|
|
* p is forked by current.
|
|
*
|
|
* __sched_fork() is basic setup used by init_idle() too:
|
|
*/
|
|
static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
|
|
{
|
|
p->on_rq = 0;
|
|
|
|
p->se.on_rq = 0;
|
|
p->se.exec_start = 0;
|
|
p->se.sum_exec_runtime = 0;
|
|
p->se.prev_sum_exec_runtime = 0;
|
|
p->se.nr_migrations = 0;
|
|
p->se.vruntime = 0;
|
|
INIT_LIST_HEAD(&p->se.group_node);
|
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
p->se.cfs_rq = NULL;
|
|
#endif
|
|
|
|
trace_android_rvh_sched_fork_init(p);
|
|
|
|
#ifdef CONFIG_SCHEDSTATS
|
|
/* Even if schedstat is disabled, there should not be garbage */
|
|
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
|
|
#endif
|
|
|
|
RB_CLEAR_NODE(&p->dl.rb_node);
|
|
init_dl_task_timer(&p->dl);
|
|
init_dl_inactive_task_timer(&p->dl);
|
|
__dl_clear_params(p);
|
|
|
|
INIT_LIST_HEAD(&p->rt.run_list);
|
|
p->rt.timeout = 0;
|
|
p->rt.time_slice = sched_rr_timeslice;
|
|
p->rt.on_rq = 0;
|
|
p->rt.on_list = 0;
|
|
|
|
#ifdef CONFIG_PREEMPT_NOTIFIERS
|
|
INIT_HLIST_HEAD(&p->preempt_notifiers);
|
|
#endif
|
|
|
|
#ifdef CONFIG_COMPACTION
|
|
p->capture_control = NULL;
|
|
#endif
|
|
init_numa_balancing(clone_flags, p);
|
|
#ifdef CONFIG_SMP
|
|
p->wake_entry.u_flags = CSD_TYPE_TTWU;
|
|
p->migration_pending = NULL;
|
|
#endif
|
|
}
|
|
|
|
DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
|
|
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
|
|
void set_numabalancing_state(bool enabled)
|
|
{
|
|
if (enabled)
|
|
static_branch_enable(&sched_numa_balancing);
|
|
else
|
|
static_branch_disable(&sched_numa_balancing);
|
|
}
|
|
|
|
#ifdef CONFIG_PROC_SYSCTL
|
|
int sysctl_numa_balancing(struct ctl_table *table, int write,
|
|
void *buffer, size_t *lenp, loff_t *ppos)
|
|
{
|
|
struct ctl_table t;
|
|
int err;
|
|
int state = static_branch_likely(&sched_numa_balancing);
|
|
|
|
if (write && !capable(CAP_SYS_ADMIN))
|
|
return -EPERM;
|
|
|
|
t = *table;
|
|
t.data = &state;
|
|
err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
|
|
if (err < 0)
|
|
return err;
|
|
if (write)
|
|
set_numabalancing_state(state);
|
|
return err;
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
#ifdef CONFIG_SCHEDSTATS
|
|
|
|
DEFINE_STATIC_KEY_FALSE(sched_schedstats);
|
|
|
|
static void set_schedstats(bool enabled)
|
|
{
|
|
if (enabled)
|
|
static_branch_enable(&sched_schedstats);
|
|
else
|
|
static_branch_disable(&sched_schedstats);
|
|
}
|
|
|
|
void force_schedstat_enabled(void)
|
|
{
|
|
if (!schedstat_enabled()) {
|
|
pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
|
|
static_branch_enable(&sched_schedstats);
|
|
}
|
|
}
|
|
|
|
static int __init setup_schedstats(char *str)
|
|
{
|
|
int ret = 0;
|
|
if (!str)
|
|
goto out;
|
|
|
|
if (!strcmp(str, "enable")) {
|
|
set_schedstats(true);
|
|
ret = 1;
|
|
} else if (!strcmp(str, "disable")) {
|
|
set_schedstats(false);
|
|
ret = 1;
|
|
}
|
|
out:
|
|
if (!ret)
|
|
pr_warn("Unable to parse schedstats=\n");
|
|
|
|
return ret;
|
|
}
|
|
__setup("schedstats=", setup_schedstats);
|
|
|
|
#ifdef CONFIG_PROC_SYSCTL
|
|
int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
|
|
size_t *lenp, loff_t *ppos)
|
|
{
|
|
struct ctl_table t;
|
|
int err;
|
|
int state = static_branch_likely(&sched_schedstats);
|
|
|
|
if (write && !capable(CAP_SYS_ADMIN))
|
|
return -EPERM;
|
|
|
|
t = *table;
|
|
t.data = &state;
|
|
err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
|
|
if (err < 0)
|
|
return err;
|
|
if (write)
|
|
set_schedstats(state);
|
|
return err;
|
|
}
|
|
#endif /* CONFIG_PROC_SYSCTL */
|
|
#endif /* CONFIG_SCHEDSTATS */
|
|
|
|
/*
|
|
* fork()/clone()-time setup:
|
|
*/
|
|
int sched_fork(unsigned long clone_flags, struct task_struct *p)
|
|
{
|
|
trace_android_rvh_sched_fork(p);
|
|
|
|
__sched_fork(clone_flags, p);
|
|
/*
|
|
* We mark the process as NEW here. This guarantees that
|
|
* nobody will actually run it, and a signal or other external
|
|
* event cannot wake it up and insert it on the runqueue either.
|
|
*/
|
|
p->__state = TASK_NEW;
|
|
|
|
/*
|
|
* Make sure we do not leak PI boosting priority to the child.
|
|
*/
|
|
p->prio = current->normal_prio;
|
|
trace_android_rvh_prepare_prio_fork(p);
|
|
|
|
uclamp_fork(p);
|
|
|
|
/*
|
|
* Revert to default priority/policy on fork if requested.
|
|
*/
|
|
if (unlikely(p->sched_reset_on_fork)) {
|
|
if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
|
|
p->policy = SCHED_NORMAL;
|
|
p->static_prio = NICE_TO_PRIO(0);
|
|
p->rt_priority = 0;
|
|
} else if (PRIO_TO_NICE(p->static_prio) < 0)
|
|
p->static_prio = NICE_TO_PRIO(0);
|
|
|
|
p->prio = p->normal_prio = p->static_prio;
|
|
set_load_weight(p, false);
|
|
|
|
/*
|
|
* We don't need the reset flag anymore after the fork. It has
|
|
* fulfilled its duty:
|
|
*/
|
|
p->sched_reset_on_fork = 0;
|
|
}
|
|
|
|
if (dl_prio(p->prio))
|
|
return -EAGAIN;
|
|
else if (rt_prio(p->prio))
|
|
p->sched_class = &rt_sched_class;
|
|
else
|
|
p->sched_class = &fair_sched_class;
|
|
|
|
init_entity_runnable_average(&p->se);
|
|
trace_android_rvh_finish_prio_fork(p);
|
|
|
|
|
|
|
|
#ifdef CONFIG_SCHED_INFO
|
|
if (likely(sched_info_on()))
|
|
memset(&p->sched_info, 0, sizeof(p->sched_info));
|
|
#endif
|
|
#if defined(CONFIG_SMP)
|
|
p->on_cpu = 0;
|
|
#endif
|
|
init_task_preempt_count(p);
|
|
#ifdef CONFIG_SMP
|
|
plist_node_init(&p->pushable_tasks, MAX_PRIO);
|
|
RB_CLEAR_NODE(&p->pushable_dl_tasks);
|
|
#endif
|
|
return 0;
|
|
}
|
|
|
|
void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
|
|
{
|
|
unsigned long flags;
|
|
|
|
/*
|
|
* Because we're not yet on the pid-hash, p->pi_lock isn't strictly
|
|
* required yet, but lockdep gets upset if rules are violated.
|
|
*/
|
|
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
|
#ifdef CONFIG_CGROUP_SCHED
|
|
if (1) {
|
|
struct task_group *tg;
|
|
tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
|
|
struct task_group, css);
|
|
tg = autogroup_task_group(p, tg);
|
|
p->sched_task_group = tg;
|
|
}
|
|
#endif
|
|
rseq_migrate(p);
|
|
/*
|
|
* We're setting the CPU for the first time, we don't migrate,
|
|
* so use __set_task_cpu().
|
|
*/
|
|
__set_task_cpu(p, smp_processor_id());
|
|
if (p->sched_class->task_fork)
|
|
p->sched_class->task_fork(p);
|
|
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
|
}
|
|
|
|
void sched_post_fork(struct task_struct *p)
|
|
{
|
|
uclamp_post_fork(p);
|
|
}
|
|
|
|
unsigned long to_ratio(u64 period, u64 runtime)
|
|
{
|
|
if (runtime == RUNTIME_INF)
|
|
return BW_UNIT;
|
|
|
|
/*
|
|
* Doing this here saves a lot of checks in all
|
|
* the calling paths, and returning zero seems
|
|
* safe for them anyway.
|
|
*/
|
|
if (period == 0)
|
|
return 0;
|
|
|
|
return div64_u64(runtime << BW_SHIFT, period);
|
|
}
|
|
|
|
/*
|
|
* wake_up_new_task - wake up a newly created task for the first time.
|
|
*
|
|
* This function will do some initial scheduler statistics housekeeping
|
|
* that must be done for every newly created context, then puts the task
|
|
* on the runqueue and wakes it.
|
|
*/
|
|
void wake_up_new_task(struct task_struct *p)
|
|
{
|
|
struct rq_flags rf;
|
|
struct rq *rq;
|
|
|
|
trace_android_rvh_wake_up_new_task(p);
|
|
|
|
raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
|
|
WRITE_ONCE(p->__state, TASK_RUNNING);
|
|
#ifdef CONFIG_SMP
|
|
/*
|
|
* Fork balancing, do it here and not earlier because:
|
|
* - cpus_ptr can change in the fork path
|
|
* - any previously selected CPU might disappear through hotplug
|
|
*
|
|
* Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
|
|
* as we're not fully set-up yet.
|
|
*/
|
|
p->recent_used_cpu = task_cpu(p);
|
|
rseq_migrate(p);
|
|
__set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK));
|
|
#endif
|
|
rq = __task_rq_lock(p, &rf);
|
|
update_rq_clock(rq);
|
|
post_init_entity_util_avg(p);
|
|
trace_android_rvh_new_task_stats(p);
|
|
|
|
activate_task(rq, p, ENQUEUE_NOCLOCK);
|
|
trace_sched_wakeup_new(p);
|
|
check_preempt_curr(rq, p, WF_FORK);
|
|
#ifdef CONFIG_SMP
|
|
if (p->sched_class->task_woken) {
|
|
/*
|
|
* Nothing relies on rq->lock after this, so it's fine to
|
|
* drop it.
|
|
*/
|
|
rq_unpin_lock(rq, &rf);
|
|
p->sched_class->task_woken(rq, p);
|
|
rq_repin_lock(rq, &rf);
|
|
}
|
|
#endif
|
|
task_rq_unlock(rq, p, &rf);
|
|
}
|
|
|
|
#ifdef CONFIG_PREEMPT_NOTIFIERS
|
|
|
|
static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
|
|
|
|
void preempt_notifier_inc(void)
|
|
{
|
|
static_branch_inc(&preempt_notifier_key);
|
|
}
|
|
EXPORT_SYMBOL_GPL(preempt_notifier_inc);
|
|
|
|
void preempt_notifier_dec(void)
|
|
{
|
|
static_branch_dec(&preempt_notifier_key);
|
|
}
|
|
EXPORT_SYMBOL_GPL(preempt_notifier_dec);
|
|
|
|
/**
|
|
* preempt_notifier_register - tell me when current is being preempted & rescheduled
|
|
* @notifier: notifier struct to register
|
|
*/
|
|
void preempt_notifier_register(struct preempt_notifier *notifier)
|
|
{
|
|
if (!static_branch_unlikely(&preempt_notifier_key))
|
|
WARN(1, "registering preempt_notifier while notifiers disabled\n");
|
|
|
|
hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
|
|
}
|
|
EXPORT_SYMBOL_GPL(preempt_notifier_register);
|
|
|
|
/**
|
|
* preempt_notifier_unregister - no longer interested in preemption notifications
|
|
* @notifier: notifier struct to unregister
|
|
*
|
|
* This is *not* safe to call from within a preemption notifier.
|
|
*/
|
|
void preempt_notifier_unregister(struct preempt_notifier *notifier)
|
|
{
|
|
hlist_del(¬ifier->link);
|
|
}
|
|
EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
|
|
|
|
static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
|
|
{
|
|
struct preempt_notifier *notifier;
|
|
|
|
hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
|
|
notifier->ops->sched_in(notifier, raw_smp_processor_id());
|
|
}
|
|
|
|
static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
|
|
{
|
|
if (static_branch_unlikely(&preempt_notifier_key))
|
|
__fire_sched_in_preempt_notifiers(curr);
|
|
}
|
|
|
|
static void
|
|
__fire_sched_out_preempt_notifiers(struct task_struct *curr,
|
|
struct task_struct *next)
|
|
{
|
|
struct preempt_notifier *notifier;
|
|
|
|
hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
|
|
notifier->ops->sched_out(notifier, next);
|
|
}
|
|
|
|
static __always_inline void
|
|
fire_sched_out_preempt_notifiers(struct task_struct *curr,
|
|
struct task_struct *next)
|
|
{
|
|
if (static_branch_unlikely(&preempt_notifier_key))
|
|
__fire_sched_out_preempt_notifiers(curr, next);
|
|
}
|
|
|
|
#else /* !CONFIG_PREEMPT_NOTIFIERS */
|
|
|
|
static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
|
|
{
|
|
}
|
|
|
|
static inline void
|
|
fire_sched_out_preempt_notifiers(struct task_struct *curr,
|
|
struct task_struct *next)
|
|
{
|
|
}
|
|
|
|
#endif /* CONFIG_PREEMPT_NOTIFIERS */
|
|
|
|
static inline void prepare_task(struct task_struct *next)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
/*
|
|
* Claim the task as running, we do this before switching to it
|
|
* such that any running task will have this set.
|
|
*
|
|
* See the smp_load_acquire(&p->on_cpu) case in ttwu() and
|
|
* its ordering comment.
|
|
*/
|
|
WRITE_ONCE(next->on_cpu, 1);
|
|
#endif
|
|
}
|
|
|
|
static inline void finish_task(struct task_struct *prev)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
/*
|
|
* This must be the very last reference to @prev from this CPU. After
|
|
* p->on_cpu is cleared, the task can be moved to a different CPU. We
|
|
* must ensure this doesn't happen until the switch is completely
|
|
* finished.
|
|
*
|
|
* In particular, the load of prev->state in finish_task_switch() must
|
|
* happen before this.
|
|
*
|
|
* Pairs with the smp_cond_load_acquire() in try_to_wake_up().
|
|
*/
|
|
smp_store_release(&prev->on_cpu, 0);
|
|
#endif
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static void do_balance_callbacks(struct rq *rq, struct callback_head *head)
|
|
{
|
|
void (*func)(struct rq *rq);
|
|
struct callback_head *next;
|
|
|
|
lockdep_assert_rq_held(rq);
|
|
|
|
while (head) {
|
|
func = (void (*)(struct rq *))head->func;
|
|
next = head->next;
|
|
head->next = NULL;
|
|
head = next;
|
|
|
|
func(rq);
|
|
}
|
|
}
|
|
|
|
static void balance_push(struct rq *rq);
|
|
|
|
/*
|
|
* balance_push_callback is a right abuse of the callback interface and plays
|
|
* by significantly different rules.
|
|
*
|
|
* Where the normal balance_callback's purpose is to be ran in the same context
|
|
* that queued it (only later, when it's safe to drop rq->lock again),
|
|
* balance_push_callback is specifically targeted at __schedule().
|
|
*
|
|
* This abuse is tolerated because it places all the unlikely/odd cases behind
|
|
* a single test, namely: rq->balance_callback == NULL.
|
|
*/
|
|
struct callback_head balance_push_callback = {
|
|
.next = NULL,
|
|
.func = (void (*)(struct callback_head *))balance_push,
|
|
};
|
|
EXPORT_SYMBOL_GPL(balance_push_callback);
|
|
|
|
static inline struct callback_head *
|
|
__splice_balance_callbacks(struct rq *rq, bool split)
|
|
{
|
|
struct callback_head *head = rq->balance_callback;
|
|
|
|
if (likely(!head))
|
|
return NULL;
|
|
|
|
lockdep_assert_rq_held(rq);
|
|
/*
|
|
* Must not take balance_push_callback off the list when
|
|
* splice_balance_callbacks() and balance_callbacks() are not
|
|
* in the same rq->lock section.
|
|
*
|
|
* In that case it would be possible for __schedule() to interleave
|
|
* and observe the list empty.
|
|
*/
|
|
if (split && head == &balance_push_callback)
|
|
head = NULL;
|
|
else
|
|
rq->balance_callback = NULL;
|
|
|
|
return head;
|
|
}
|
|
|
|
static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
|
|
{
|
|
return __splice_balance_callbacks(rq, true);
|
|
}
|
|
|
|
void __balance_callbacks(struct rq *rq)
|
|
{
|
|
do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
|
|
}
|
|
EXPORT_SYMBOL_GPL(__balance_callbacks);
|
|
|
|
static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
|
|
{
|
|
unsigned long flags;
|
|
|
|
if (unlikely(head)) {
|
|
raw_spin_rq_lock_irqsave(rq, flags);
|
|
do_balance_callbacks(rq, head);
|
|
raw_spin_rq_unlock_irqrestore(rq, flags);
|
|
}
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void __balance_callbacks(struct rq *rq)
|
|
{
|
|
}
|
|
|
|
static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
|
|
{
|
|
}
|
|
|
|
#endif
|
|
|
|
static inline void
|
|
prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
|
|
{
|
|
/*
|
|
* Since the runqueue lock will be released by the next
|
|
* task (which is an invalid locking op but in the case
|
|
* of the scheduler it's an obvious special-case), so we
|
|
* do an early lockdep release here:
|
|
*/
|
|
rq_unpin_lock(rq, rf);
|
|
spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
|
|
#ifdef CONFIG_DEBUG_SPINLOCK
|
|
/* this is a valid case when another task releases the spinlock */
|
|
rq_lockp(rq)->owner = next;
|
|
#endif
|
|
}
|
|
|
|
static inline void finish_lock_switch(struct rq *rq)
|
|
{
|
|
/*
|
|
* If we are tracking spinlock dependencies then we have to
|
|
* fix up the runqueue lock - which gets 'carried over' from
|
|
* prev into current:
|
|
*/
|
|
spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
|
|
__balance_callbacks(rq);
|
|
raw_spin_rq_unlock_irq(rq);
|
|
}
|
|
|
|
/*
|
|
* NOP if the arch has not defined these:
|
|
*/
|
|
|
|
#ifndef prepare_arch_switch
|
|
# define prepare_arch_switch(next) do { } while (0)
|
|
#endif
|
|
|
|
#ifndef finish_arch_post_lock_switch
|
|
# define finish_arch_post_lock_switch() do { } while (0)
|
|
#endif
|
|
|
|
static inline void kmap_local_sched_out(void)
|
|
{
|
|
#ifdef CONFIG_KMAP_LOCAL
|
|
if (unlikely(current->kmap_ctrl.idx))
|
|
__kmap_local_sched_out();
|
|
#endif
|
|
}
|
|
|
|
static inline void kmap_local_sched_in(void)
|
|
{
|
|
#ifdef CONFIG_KMAP_LOCAL
|
|
if (unlikely(current->kmap_ctrl.idx))
|
|
__kmap_local_sched_in();
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* prepare_task_switch - prepare to switch tasks
|
|
* @rq: the runqueue preparing to switch
|
|
* @prev: the current task that is being switched out
|
|
* @next: the task we are going to switch to.
|
|
*
|
|
* This is called with the rq lock held and interrupts off. It must
|
|
* be paired with a subsequent finish_task_switch after the context
|
|
* switch.
|
|
*
|
|
* prepare_task_switch sets up locking and calls architecture specific
|
|
* hooks.
|
|
*/
|
|
static inline void
|
|
prepare_task_switch(struct rq *rq, struct task_struct *prev,
|
|
struct task_struct *next)
|
|
{
|
|
kcov_prepare_switch(prev);
|
|
sched_info_switch(rq, prev, next);
|
|
perf_event_task_sched_out(prev, next);
|
|
rseq_preempt(prev);
|
|
fire_sched_out_preempt_notifiers(prev, next);
|
|
kmap_local_sched_out();
|
|
prepare_task(next);
|
|
prepare_arch_switch(next);
|
|
}
|
|
|
|
/**
|
|
* finish_task_switch - clean up after a task-switch
|
|
* @prev: the thread we just switched away from.
|
|
*
|
|
* finish_task_switch must be called after the context switch, paired
|
|
* with a prepare_task_switch call before the context switch.
|
|
* finish_task_switch will reconcile locking set up by prepare_task_switch,
|
|
* and do any other architecture-specific cleanup actions.
|
|
*
|
|
* Note that we may have delayed dropping an mm in context_switch(). If
|
|
* so, we finish that here outside of the runqueue lock. (Doing it
|
|
* with the lock held can cause deadlocks; see schedule() for
|
|
* details.)
|
|
*
|
|
* The context switch have flipped the stack from under us and restored the
|
|
* local variables which were saved when this task called schedule() in the
|
|
* past. prev == current is still correct but we need to recalculate this_rq
|
|
* because prev may have moved to another CPU.
|
|
*/
|
|
static struct rq *finish_task_switch(struct task_struct *prev)
|
|
__releases(rq->lock)
|
|
{
|
|
struct rq *rq = this_rq();
|
|
struct mm_struct *mm = rq->prev_mm;
|
|
long prev_state;
|
|
|
|
/*
|
|
* The previous task will have left us with a preempt_count of 2
|
|
* because it left us after:
|
|
*
|
|
* schedule()
|
|
* preempt_disable(); // 1
|
|
* __schedule()
|
|
* raw_spin_lock_irq(&rq->lock) // 2
|
|
*
|
|
* Also, see FORK_PREEMPT_COUNT.
|
|
*/
|
|
if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
|
|
"corrupted preempt_count: %s/%d/0x%x\n",
|
|
current->comm, current->pid, preempt_count()))
|
|
preempt_count_set(FORK_PREEMPT_COUNT);
|
|
|
|
rq->prev_mm = NULL;
|
|
|
|
/*
|
|
* A task struct has one reference for the use as "current".
|
|
* If a task dies, then it sets TASK_DEAD in tsk->state and calls
|
|
* schedule one last time. The schedule call will never return, and
|
|
* the scheduled task must drop that reference.
|
|
*
|
|
* We must observe prev->state before clearing prev->on_cpu (in
|
|
* finish_task), otherwise a concurrent wakeup can get prev
|
|
* running on another CPU and we could rave with its RUNNING -> DEAD
|
|
* transition, resulting in a double drop.
|
|
*/
|
|
prev_state = READ_ONCE(prev->__state);
|
|
vtime_task_switch(prev);
|
|
perf_event_task_sched_in(prev, current);
|
|
finish_task(prev);
|
|
tick_nohz_task_switch();
|
|
finish_lock_switch(rq);
|
|
finish_arch_post_lock_switch();
|
|
kcov_finish_switch(current);
|
|
/*
|
|
* kmap_local_sched_out() is invoked with rq::lock held and
|
|
* interrupts disabled. There is no requirement for that, but the
|
|
* sched out code does not have an interrupt enabled section.
|
|
* Restoring the maps on sched in does not require interrupts being
|
|
* disabled either.
|
|
*/
|
|
kmap_local_sched_in();
|
|
|
|
fire_sched_in_preempt_notifiers(current);
|
|
/*
|
|
* When switching through a kernel thread, the loop in
|
|
* membarrier_{private,global}_expedited() may have observed that
|
|
* kernel thread and not issued an IPI. It is therefore possible to
|
|
* schedule between user->kernel->user threads without passing though
|
|
* switch_mm(). Membarrier requires a barrier after storing to
|
|
* rq->curr, before returning to userspace, so provide them here:
|
|
*
|
|
* - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
|
|
* provided by mmdrop(),
|
|
* - a sync_core for SYNC_CORE.
|
|
*/
|
|
if (mm) {
|
|
membarrier_mm_sync_core_before_usermode(mm);
|
|
mmdrop(mm);
|
|
}
|
|
if (unlikely(prev_state == TASK_DEAD)) {
|
|
if (prev->sched_class->task_dead)
|
|
prev->sched_class->task_dead(prev);
|
|
|
|
/*
|
|
* Remove function-return probe instances associated with this
|
|
* task and put them back on the free list.
|
|
*/
|
|
kprobe_flush_task(prev);
|
|
trace_android_rvh_flush_task(prev);
|
|
|
|
/* Task is done with its stack. */
|
|
put_task_stack(prev);
|
|
|
|
put_task_struct_rcu_user(prev);
|
|
}
|
|
|
|
return rq;
|
|
}
|
|
|
|
/**
|
|
* schedule_tail - first thing a freshly forked thread must call.
|
|
* @prev: the thread we just switched away from.
|
|
*/
|
|
asmlinkage __visible void schedule_tail(struct task_struct *prev)
|
|
__releases(rq->lock)
|
|
{
|
|
/*
|
|
* New tasks start with FORK_PREEMPT_COUNT, see there and
|
|
* finish_task_switch() for details.
|
|
*
|
|
* finish_task_switch() will drop rq->lock() and lower preempt_count
|
|
* and the preempt_enable() will end up enabling preemption (on
|
|
* PREEMPT_COUNT kernels).
|
|
*/
|
|
|
|
finish_task_switch(prev);
|
|
preempt_enable();
|
|
|
|
if (current->set_child_tid)
|
|
put_user(task_pid_vnr(current), current->set_child_tid);
|
|
|
|
calculate_sigpending();
|
|
}
|
|
|
|
/*
|
|
* context_switch - switch to the new MM and the new thread's register state.
|
|
*/
|
|
static __always_inline struct rq *
|
|
context_switch(struct rq *rq, struct task_struct *prev,
|
|
struct task_struct *next, struct rq_flags *rf)
|
|
{
|
|
prepare_task_switch(rq, prev, next);
|
|
|
|
/*
|
|
* For paravirt, this is coupled with an exit in switch_to to
|
|
* combine the page table reload and the switch backend into
|
|
* one hypercall.
|
|
*/
|
|
arch_start_context_switch(prev);
|
|
|
|
/*
|
|
* kernel -> kernel lazy + transfer active
|
|
* user -> kernel lazy + mmgrab() active
|
|
*
|
|
* kernel -> user switch + mmdrop() active
|
|
* user -> user switch
|
|
*/
|
|
if (!next->mm) { // to kernel
|
|
enter_lazy_tlb(prev->active_mm, next);
|
|
|
|
next->active_mm = prev->active_mm;
|
|
if (prev->mm) // from user
|
|
mmgrab(prev->active_mm);
|
|
else
|
|
prev->active_mm = NULL;
|
|
} else { // to user
|
|
membarrier_switch_mm(rq, prev->active_mm, next->mm);
|
|
/*
|
|
* sys_membarrier() requires an smp_mb() between setting
|
|
* rq->curr / membarrier_switch_mm() and returning to userspace.
|
|
*
|
|
* The below provides this either through switch_mm(), or in
|
|
* case 'prev->active_mm == next->mm' through
|
|
* finish_task_switch()'s mmdrop().
|
|
*/
|
|
switch_mm_irqs_off(prev->active_mm, next->mm, next);
|
|
lru_gen_use_mm(next->mm);
|
|
|
|
if (!prev->mm) { // from kernel
|
|
/* will mmdrop() in finish_task_switch(). */
|
|
rq->prev_mm = prev->active_mm;
|
|
prev->active_mm = NULL;
|
|
}
|
|
}
|
|
|
|
rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
|
|
|
|
prepare_lock_switch(rq, next, rf);
|
|
|
|
/* Here we just switch the register state and the stack. */
|
|
switch_to(prev, next, prev);
|
|
barrier();
|
|
|
|
return finish_task_switch(prev);
|
|
}
|
|
|
|
/*
|
|
* nr_running and nr_context_switches:
|
|
*
|
|
* externally visible scheduler statistics: current number of runnable
|
|
* threads, total number of context switches performed since bootup.
|
|
*/
|
|
unsigned int nr_running(void)
|
|
{
|
|
unsigned int i, sum = 0;
|
|
|
|
for_each_online_cpu(i)
|
|
sum += cpu_rq(i)->nr_running;
|
|
|
|
return sum;
|
|
}
|
|
|
|
/*
|
|
* Check if only the current task is running on the CPU.
|
|
*
|
|
* Caution: this function does not check that the caller has disabled
|
|
* preemption, thus the result might have a time-of-check-to-time-of-use
|
|
* race. The caller is responsible to use it correctly, for example:
|
|
*
|
|
* - from a non-preemptible section (of course)
|
|
*
|
|
* - from a thread that is bound to a single CPU
|
|
*
|
|
* - in a loop with very short iterations (e.g. a polling loop)
|
|
*/
|
|
bool single_task_running(void)
|
|
{
|
|
return raw_rq()->nr_running == 1;
|
|
}
|
|
EXPORT_SYMBOL(single_task_running);
|
|
|
|
unsigned long long nr_context_switches(void)
|
|
{
|
|
int i;
|
|
unsigned long long sum = 0;
|
|
|
|
for_each_possible_cpu(i)
|
|
sum += cpu_rq(i)->nr_switches;
|
|
|
|
return sum;
|
|
}
|
|
|
|
/*
|
|
* Consumers of these two interfaces, like for example the cpuidle menu
|
|
* governor, are using nonsensical data. Preferring shallow idle state selection
|
|
* for a CPU that has IO-wait which might not even end up running the task when
|
|
* it does become runnable.
|
|
*/
|
|
|
|
unsigned int nr_iowait_cpu(int cpu)
|
|
{
|
|
return atomic_read(&cpu_rq(cpu)->nr_iowait);
|
|
}
|
|
|
|
/*
|
|
* IO-wait accounting, and how it's mostly bollocks (on SMP).
|
|
*
|
|
* The idea behind IO-wait account is to account the idle time that we could
|
|
* have spend running if it were not for IO. That is, if we were to improve the
|
|
* storage performance, we'd have a proportional reduction in IO-wait time.
|
|
*
|
|
* This all works nicely on UP, where, when a task blocks on IO, we account
|
|
* idle time as IO-wait, because if the storage were faster, it could've been
|
|
* running and we'd not be idle.
|
|
*
|
|
* This has been extended to SMP, by doing the same for each CPU. This however
|
|
* is broken.
|
|
*
|
|
* Imagine for instance the case where two tasks block on one CPU, only the one
|
|
* CPU will have IO-wait accounted, while the other has regular idle. Even
|
|
* though, if the storage were faster, both could've ran at the same time,
|
|
* utilising both CPUs.
|
|
*
|
|
* This means, that when looking globally, the current IO-wait accounting on
|
|
* SMP is a lower bound, by reason of under accounting.
|
|
*
|
|
* Worse, since the numbers are provided per CPU, they are sometimes
|
|
* interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
|
|
* associated with any one particular CPU, it can wake to another CPU than it
|
|
* blocked on. This means the per CPU IO-wait number is meaningless.
|
|
*
|
|
* Task CPU affinities can make all that even more 'interesting'.
|
|
*/
|
|
|
|
unsigned int nr_iowait(void)
|
|
{
|
|
unsigned int i, sum = 0;
|
|
|
|
for_each_possible_cpu(i)
|
|
sum += nr_iowait_cpu(i);
|
|
|
|
return sum;
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/*
|
|
* sched_exec - execve() is a valuable balancing opportunity, because at
|
|
* this point the task has the smallest effective memory and cache footprint.
|
|
*/
|
|
void sched_exec(void)
|
|
{
|
|
struct task_struct *p = current;
|
|
unsigned long flags;
|
|
int dest_cpu;
|
|
bool cond = false;
|
|
|
|
trace_android_rvh_sched_exec(&cond);
|
|
if (cond)
|
|
return;
|
|
|
|
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
|
dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
|
|
if (dest_cpu == smp_processor_id())
|
|
goto unlock;
|
|
|
|
if (likely(cpu_active(dest_cpu))) {
|
|
struct migration_arg arg = { p, dest_cpu };
|
|
|
|
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
|
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
|
|
return;
|
|
}
|
|
unlock:
|
|
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
|
}
|
|
|
|
#endif
|
|
|
|
DEFINE_PER_CPU(struct kernel_stat, kstat);
|
|
DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
|
|
|
|
EXPORT_PER_CPU_SYMBOL(kstat);
|
|
EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
|
|
|
|
/*
|
|
* The function fair_sched_class.update_curr accesses the struct curr
|
|
* and its field curr->exec_start; when called from task_sched_runtime(),
|
|
* we observe a high rate of cache misses in practice.
|
|
* Prefetching this data results in improved performance.
|
|
*/
|
|
static inline void prefetch_curr_exec_start(struct task_struct *p)
|
|
{
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
struct sched_entity *curr = (&p->se)->cfs_rq->curr;
|
|
#else
|
|
struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
|
|
#endif
|
|
prefetch(curr);
|
|
prefetch(&curr->exec_start);
|
|
}
|
|
|
|
/*
|
|
* Return accounted runtime for the task.
|
|
* In case the task is currently running, return the runtime plus current's
|
|
* pending runtime that have not been accounted yet.
|
|
*/
|
|
unsigned long long task_sched_runtime(struct task_struct *p)
|
|
{
|
|
struct rq_flags rf;
|
|
struct rq *rq;
|
|
u64 ns;
|
|
|
|
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
|
|
/*
|
|
* 64-bit doesn't need locks to atomically read a 64-bit value.
|
|
* So we have a optimization chance when the task's delta_exec is 0.
|
|
* Reading ->on_cpu is racy, but this is ok.
|
|
*
|
|
* If we race with it leaving CPU, we'll take a lock. So we're correct.
|
|
* If we race with it entering CPU, unaccounted time is 0. This is
|
|
* indistinguishable from the read occurring a few cycles earlier.
|
|
* If we see ->on_cpu without ->on_rq, the task is leaving, and has
|
|
* been accounted, so we're correct here as well.
|
|
*/
|
|
if (!p->on_cpu || !task_on_rq_queued(p))
|
|
return p->se.sum_exec_runtime;
|
|
#endif
|
|
|
|
rq = task_rq_lock(p, &rf);
|
|
/*
|
|
* Must be ->curr _and_ ->on_rq. If dequeued, we would
|
|
* project cycles that may never be accounted to this
|
|
* thread, breaking clock_gettime().
|
|
*/
|
|
if (task_current(rq, p) && task_on_rq_queued(p)) {
|
|
prefetch_curr_exec_start(p);
|
|
update_rq_clock(rq);
|
|
p->sched_class->update_curr(rq);
|
|
}
|
|
ns = p->se.sum_exec_runtime;
|
|
task_rq_unlock(rq, p, &rf);
|
|
|
|
return ns;
|
|
}
|
|
EXPORT_SYMBOL_GPL(task_sched_runtime);
|
|
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
static u64 cpu_resched_latency(struct rq *rq)
|
|
{
|
|
int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
|
|
u64 resched_latency, now = rq_clock(rq);
|
|
static bool warned_once;
|
|
|
|
if (sysctl_resched_latency_warn_once && warned_once)
|
|
return 0;
|
|
|
|
if (!need_resched() || !latency_warn_ms)
|
|
return 0;
|
|
|
|
if (system_state == SYSTEM_BOOTING)
|
|
return 0;
|
|
|
|
if (!rq->last_seen_need_resched_ns) {
|
|
rq->last_seen_need_resched_ns = now;
|
|
rq->ticks_without_resched = 0;
|
|
return 0;
|
|
}
|
|
|
|
rq->ticks_without_resched++;
|
|
resched_latency = now - rq->last_seen_need_resched_ns;
|
|
if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
|
|
return 0;
|
|
|
|
warned_once = true;
|
|
|
|
return resched_latency;
|
|
}
|
|
|
|
static int __init setup_resched_latency_warn_ms(char *str)
|
|
{
|
|
long val;
|
|
|
|
if ((kstrtol(str, 0, &val))) {
|
|
pr_warn("Unable to set resched_latency_warn_ms\n");
|
|
return 1;
|
|
}
|
|
|
|
sysctl_resched_latency_warn_ms = val;
|
|
return 1;
|
|
}
|
|
__setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
|
|
#else
|
|
static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
|
|
#endif /* CONFIG_SCHED_DEBUG */
|
|
|
|
/*
|
|
* This function gets called by the timer code, with HZ frequency.
|
|
* We call it with interrupts disabled.
|
|
*/
|
|
void scheduler_tick(void)
|
|
{
|
|
int cpu = smp_processor_id();
|
|
struct rq *rq = cpu_rq(cpu);
|
|
struct task_struct *curr = rq->curr;
|
|
struct rq_flags rf;
|
|
unsigned long thermal_pressure;
|
|
u64 resched_latency;
|
|
|
|
arch_scale_freq_tick();
|
|
sched_clock_tick();
|
|
|
|
rq_lock(rq, &rf);
|
|
|
|
update_rq_clock(rq);
|
|
trace_android_rvh_tick_entry(rq);
|
|
|
|
thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
|
|
update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure);
|
|
curr->sched_class->task_tick(rq, curr, 0);
|
|
if (sched_feat(LATENCY_WARN))
|
|
resched_latency = cpu_resched_latency(rq);
|
|
calc_global_load_tick(rq);
|
|
|
|
rq_unlock(rq, &rf);
|
|
|
|
if (sched_feat(LATENCY_WARN) && resched_latency)
|
|
resched_latency_warn(cpu, resched_latency);
|
|
|
|
perf_event_task_tick();
|
|
|
|
#ifdef CONFIG_SMP
|
|
rq->idle_balance = idle_cpu(cpu);
|
|
trigger_load_balance(rq);
|
|
#endif
|
|
|
|
trace_android_vh_scheduler_tick(rq);
|
|
}
|
|
|
|
#ifdef CONFIG_NO_HZ_FULL
|
|
|
|
struct tick_work {
|
|
int cpu;
|
|
atomic_t state;
|
|
struct delayed_work work;
|
|
};
|
|
/* Values for ->state, see diagram below. */
|
|
#define TICK_SCHED_REMOTE_OFFLINE 0
|
|
#define TICK_SCHED_REMOTE_OFFLINING 1
|
|
#define TICK_SCHED_REMOTE_RUNNING 2
|
|
|
|
/*
|
|
* State diagram for ->state:
|
|
*
|
|
*
|
|
* TICK_SCHED_REMOTE_OFFLINE
|
|
* | ^
|
|
* | |
|
|
* | | sched_tick_remote()
|
|
* | |
|
|
* | |
|
|
* +--TICK_SCHED_REMOTE_OFFLINING
|
|
* | ^
|
|
* | |
|
|
* sched_tick_start() | | sched_tick_stop()
|
|
* | |
|
|
* V |
|
|
* TICK_SCHED_REMOTE_RUNNING
|
|
*
|
|
*
|
|
* Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
|
|
* and sched_tick_start() are happy to leave the state in RUNNING.
|
|
*/
|
|
|
|
static struct tick_work __percpu *tick_work_cpu;
|
|
|
|
static void sched_tick_remote(struct work_struct *work)
|
|
{
|
|
struct delayed_work *dwork = to_delayed_work(work);
|
|
struct tick_work *twork = container_of(dwork, struct tick_work, work);
|
|
int cpu = twork->cpu;
|
|
struct rq *rq = cpu_rq(cpu);
|
|
struct task_struct *curr;
|
|
struct rq_flags rf;
|
|
u64 delta;
|
|
int os;
|
|
|
|
/*
|
|
* Handle the tick only if it appears the remote CPU is running in full
|
|
* dynticks mode. The check is racy by nature, but missing a tick or
|
|
* having one too much is no big deal because the scheduler tick updates
|
|
* statistics and checks timeslices in a time-independent way, regardless
|
|
* of when exactly it is running.
|
|
*/
|
|
if (!tick_nohz_tick_stopped_cpu(cpu))
|
|
goto out_requeue;
|
|
|
|
rq_lock_irq(rq, &rf);
|
|
curr = rq->curr;
|
|
if (cpu_is_offline(cpu))
|
|
goto out_unlock;
|
|
|
|
update_rq_clock(rq);
|
|
|
|
if (!is_idle_task(curr)) {
|
|
/*
|
|
* Make sure the next tick runs within a reasonable
|
|
* amount of time.
|
|
*/
|
|
delta = rq_clock_task(rq) - curr->se.exec_start;
|
|
WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
|
|
}
|
|
curr->sched_class->task_tick(rq, curr, 0);
|
|
|
|
calc_load_nohz_remote(rq);
|
|
out_unlock:
|
|
rq_unlock_irq(rq, &rf);
|
|
out_requeue:
|
|
|
|
/*
|
|
* Run the remote tick once per second (1Hz). This arbitrary
|
|
* frequency is large enough to avoid overload but short enough
|
|
* to keep scheduler internal stats reasonably up to date. But
|
|
* first update state to reflect hotplug activity if required.
|
|
*/
|
|
os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
|
|
WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
|
|
if (os == TICK_SCHED_REMOTE_RUNNING)
|
|
queue_delayed_work(system_unbound_wq, dwork, HZ);
|
|
}
|
|
|
|
static void sched_tick_start(int cpu)
|
|
{
|
|
int os;
|
|
struct tick_work *twork;
|
|
|
|
if (housekeeping_cpu(cpu, HK_FLAG_TICK))
|
|
return;
|
|
|
|
WARN_ON_ONCE(!tick_work_cpu);
|
|
|
|
twork = per_cpu_ptr(tick_work_cpu, cpu);
|
|
os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
|
|
WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
|
|
if (os == TICK_SCHED_REMOTE_OFFLINE) {
|
|
twork->cpu = cpu;
|
|
INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
|
|
queue_delayed_work(system_unbound_wq, &twork->work, HZ);
|
|
}
|
|
}
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
static void sched_tick_stop(int cpu)
|
|
{
|
|
struct tick_work *twork;
|
|
int os;
|
|
|
|
if (housekeeping_cpu(cpu, HK_FLAG_TICK))
|
|
return;
|
|
|
|
WARN_ON_ONCE(!tick_work_cpu);
|
|
|
|
twork = per_cpu_ptr(tick_work_cpu, cpu);
|
|
/* There cannot be competing actions, but don't rely on stop-machine. */
|
|
os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
|
|
WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
|
|
/* Don't cancel, as this would mess up the state machine. */
|
|
}
|
|
#endif /* CONFIG_HOTPLUG_CPU */
|
|
|
|
int __init sched_tick_offload_init(void)
|
|
{
|
|
tick_work_cpu = alloc_percpu(struct tick_work);
|
|
BUG_ON(!tick_work_cpu);
|
|
return 0;
|
|
}
|
|
|
|
#else /* !CONFIG_NO_HZ_FULL */
|
|
static inline void sched_tick_start(int cpu) { }
|
|
static inline void sched_tick_stop(int cpu) { }
|
|
#endif
|
|
|
|
#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
|
|
defined(CONFIG_TRACE_PREEMPT_TOGGLE))
|
|
/*
|
|
* If the value passed in is equal to the current preempt count
|
|
* then we just disabled preemption. Start timing the latency.
|
|
*/
|
|
static inline void preempt_latency_start(int val)
|
|
{
|
|
if (preempt_count() == val) {
|
|
unsigned long ip = get_lock_parent_ip();
|
|
#ifdef CONFIG_DEBUG_PREEMPT
|
|
current->preempt_disable_ip = ip;
|
|
#endif
|
|
trace_preempt_off(CALLER_ADDR0, ip);
|
|
}
|
|
}
|
|
|
|
void preempt_count_add(int val)
|
|
{
|
|
#ifdef CONFIG_DEBUG_PREEMPT
|
|
/*
|
|
* Underflow?
|
|
*/
|
|
if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
|
|
return;
|
|
#endif
|
|
__preempt_count_add(val);
|
|
#ifdef CONFIG_DEBUG_PREEMPT
|
|
/*
|
|
* Spinlock count overflowing soon?
|
|
*/
|
|
DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
|
|
PREEMPT_MASK - 10);
|
|
#endif
|
|
preempt_latency_start(val);
|
|
}
|
|
EXPORT_SYMBOL(preempt_count_add);
|
|
NOKPROBE_SYMBOL(preempt_count_add);
|
|
|
|
/*
|
|
* If the value passed in equals to the current preempt count
|
|
* then we just enabled preemption. Stop timing the latency.
|
|
*/
|
|
static inline void preempt_latency_stop(int val)
|
|
{
|
|
if (preempt_count() == val)
|
|
trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
|
|
}
|
|
|
|
void preempt_count_sub(int val)
|
|
{
|
|
#ifdef CONFIG_DEBUG_PREEMPT
|
|
/*
|
|
* Underflow?
|
|
*/
|
|
if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
|
|
return;
|
|
/*
|
|
* Is the spinlock portion underflowing?
|
|
*/
|
|
if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
|
|
!(preempt_count() & PREEMPT_MASK)))
|
|
return;
|
|
#endif
|
|
|
|
preempt_latency_stop(val);
|
|
__preempt_count_sub(val);
|
|
}
|
|
EXPORT_SYMBOL(preempt_count_sub);
|
|
NOKPROBE_SYMBOL(preempt_count_sub);
|
|
|
|
#else
|
|
static inline void preempt_latency_start(int val) { }
|
|
static inline void preempt_latency_stop(int val) { }
|
|
#endif
|
|
|
|
static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
|
|
{
|
|
#ifdef CONFIG_DEBUG_PREEMPT
|
|
return p->preempt_disable_ip;
|
|
#else
|
|
return 0;
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* Print scheduling while atomic bug:
|
|
*/
|
|
static noinline void __schedule_bug(struct task_struct *prev)
|
|
{
|
|
/* Save this before calling printk(), since that will clobber it */
|
|
unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
|
|
|
|
if (oops_in_progress)
|
|
return;
|
|
|
|
printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
|
|
prev->comm, prev->pid, preempt_count());
|
|
|
|
debug_show_held_locks(prev);
|
|
print_modules();
|
|
if (irqs_disabled())
|
|
print_irqtrace_events(prev);
|
|
if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
|
|
&& in_atomic_preempt_off()) {
|
|
pr_err("Preemption disabled at:");
|
|
print_ip_sym(KERN_ERR, preempt_disable_ip);
|
|
}
|
|
check_panic_on_warn("scheduling while atomic");
|
|
|
|
trace_android_rvh_schedule_bug(prev);
|
|
|
|
dump_stack();
|
|
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
|
|
}
|
|
|
|
/*
|
|
* Various schedule()-time debugging checks and statistics:
|
|
*/
|
|
static inline void schedule_debug(struct task_struct *prev, bool preempt)
|
|
{
|
|
#ifdef CONFIG_SCHED_STACK_END_CHECK
|
|
if (task_stack_end_corrupted(prev))
|
|
panic("corrupted stack end detected inside scheduler\n");
|
|
|
|
if (task_scs_end_corrupted(prev))
|
|
panic("corrupted shadow stack detected inside scheduler\n");
|
|
#endif
|
|
|
|
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
|
|
if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
|
|
printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
|
|
prev->comm, prev->pid, prev->non_block_count);
|
|
dump_stack();
|
|
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
|
|
}
|
|
#endif
|
|
|
|
if (unlikely(in_atomic_preempt_off())) {
|
|
__schedule_bug(prev);
|
|
preempt_count_set(PREEMPT_DISABLED);
|
|
}
|
|
rcu_sleep_check();
|
|
SCHED_WARN_ON(ct_state() == CONTEXT_USER);
|
|
|
|
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
|
|
|
|
schedstat_inc(this_rq()->sched_count);
|
|
}
|
|
|
|
static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
|
|
struct rq_flags *rf)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
const struct sched_class *class;
|
|
/*
|
|
* We must do the balancing pass before put_prev_task(), such
|
|
* that when we release the rq->lock the task is in the same
|
|
* state as before we took rq->lock.
|
|
*
|
|
* We can terminate the balance pass as soon as we know there is
|
|
* a runnable task of @class priority or higher.
|
|
*/
|
|
for_class_range(class, prev->sched_class, &idle_sched_class) {
|
|
if (class->balance(rq, prev, rf))
|
|
break;
|
|
}
|
|
#endif
|
|
|
|
put_prev_task(rq, prev);
|
|
}
|
|
|
|
/*
|
|
* Pick up the highest-prio task:
|
|
*/
|
|
static inline struct task_struct *
|
|
__pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
|
{
|
|
const struct sched_class *class;
|
|
struct task_struct *p;
|
|
|
|
/*
|
|
* Optimization: we know that if all tasks are in the fair class we can
|
|
* call that function directly, but only if the @prev task wasn't of a
|
|
* higher scheduling class, because otherwise those lose the
|
|
* opportunity to pull in more work from other CPUs.
|
|
*/
|
|
if (likely(prev->sched_class <= &fair_sched_class &&
|
|
rq->nr_running == rq->cfs.h_nr_running)) {
|
|
|
|
p = pick_next_task_fair(rq, prev, rf);
|
|
if (unlikely(p == RETRY_TASK))
|
|
goto restart;
|
|
|
|
/* Assume the next prioritized class is idle_sched_class */
|
|
if (!p) {
|
|
put_prev_task(rq, prev);
|
|
p = pick_next_task_idle(rq);
|
|
}
|
|
|
|
return p;
|
|
}
|
|
|
|
restart:
|
|
put_prev_task_balance(rq, prev, rf);
|
|
|
|
for_each_class(class) {
|
|
p = class->pick_next_task(rq);
|
|
if (p)
|
|
return p;
|
|
}
|
|
|
|
/* The idle class should always have a runnable task: */
|
|
BUG();
|
|
}
|
|
|
|
#ifdef CONFIG_SCHED_CORE
|
|
static inline bool is_task_rq_idle(struct task_struct *t)
|
|
{
|
|
return (task_rq(t)->idle == t);
|
|
}
|
|
|
|
static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
|
|
{
|
|
return is_task_rq_idle(a) || (a->core_cookie == cookie);
|
|
}
|
|
|
|
static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
|
|
{
|
|
if (is_task_rq_idle(a) || is_task_rq_idle(b))
|
|
return true;
|
|
|
|
return a->core_cookie == b->core_cookie;
|
|
}
|
|
|
|
// XXX fairness/fwd progress conditions
|
|
/*
|
|
* Returns
|
|
* - NULL if there is no runnable task for this class.
|
|
* - the highest priority task for this runqueue if it matches
|
|
* rq->core->core_cookie or its priority is greater than max.
|
|
* - Else returns idle_task.
|
|
*/
|
|
static struct task_struct *
|
|
pick_task(struct rq *rq, const struct sched_class *class, struct task_struct *max, bool in_fi)
|
|
{
|
|
struct task_struct *class_pick, *cookie_pick;
|
|
unsigned long cookie = rq->core->core_cookie;
|
|
|
|
class_pick = class->pick_task(rq);
|
|
if (!class_pick)
|
|
return NULL;
|
|
|
|
if (!cookie) {
|
|
/*
|
|
* If class_pick is tagged, return it only if it has
|
|
* higher priority than max.
|
|
*/
|
|
if (max && class_pick->core_cookie &&
|
|
prio_less(class_pick, max, in_fi))
|
|
return idle_sched_class.pick_task(rq);
|
|
|
|
return class_pick;
|
|
}
|
|
|
|
/*
|
|
* If class_pick is idle or matches cookie, return early.
|
|
*/
|
|
if (cookie_equals(class_pick, cookie))
|
|
return class_pick;
|
|
|
|
cookie_pick = sched_core_find(rq, cookie);
|
|
|
|
/*
|
|
* If class > max && class > cookie, it is the highest priority task on
|
|
* the core (so far) and it must be selected, otherwise we must go with
|
|
* the cookie pick in order to satisfy the constraint.
|
|
*/
|
|
if (prio_less(cookie_pick, class_pick, in_fi) &&
|
|
(!max || prio_less(max, class_pick, in_fi)))
|
|
return class_pick;
|
|
|
|
return cookie_pick;
|
|
}
|
|
|
|
extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
|
|
|
|
static struct task_struct *
|
|
pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
|
{
|
|
struct task_struct *next, *max = NULL;
|
|
const struct sched_class *class;
|
|
const struct cpumask *smt_mask;
|
|
bool fi_before = false;
|
|
int i, j, cpu, occ = 0;
|
|
bool need_sync;
|
|
|
|
if (!sched_core_enabled(rq))
|
|
return __pick_next_task(rq, prev, rf);
|
|
|
|
cpu = cpu_of(rq);
|
|
|
|
/* Stopper task is switching into idle, no need core-wide selection. */
|
|
if (cpu_is_offline(cpu)) {
|
|
/*
|
|
* Reset core_pick so that we don't enter the fastpath when
|
|
* coming online. core_pick would already be migrated to
|
|
* another cpu during offline.
|
|
*/
|
|
rq->core_pick = NULL;
|
|
return __pick_next_task(rq, prev, rf);
|
|
}
|
|
|
|
/*
|
|
* If there were no {en,de}queues since we picked (IOW, the task
|
|
* pointers are all still valid), and we haven't scheduled the last
|
|
* pick yet, do so now.
|
|
*
|
|
* rq->core_pick can be NULL if no selection was made for a CPU because
|
|
* it was either offline or went offline during a sibling's core-wide
|
|
* selection. In this case, do a core-wide selection.
|
|
*/
|
|
if (rq->core->core_pick_seq == rq->core->core_task_seq &&
|
|
rq->core->core_pick_seq != rq->core_sched_seq &&
|
|
rq->core_pick) {
|
|
WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
|
|
|
|
next = rq->core_pick;
|
|
if (next != prev) {
|
|
put_prev_task(rq, prev);
|
|
set_next_task(rq, next);
|
|
}
|
|
|
|
rq->core_pick = NULL;
|
|
return next;
|
|
}
|
|
|
|
put_prev_task_balance(rq, prev, rf);
|
|
|
|
smt_mask = cpu_smt_mask(cpu);
|
|
need_sync = !!rq->core->core_cookie;
|
|
|
|
/* reset state */
|
|
rq->core->core_cookie = 0UL;
|
|
if (rq->core->core_forceidle) {
|
|
need_sync = true;
|
|
fi_before = true;
|
|
rq->core->core_forceidle = false;
|
|
}
|
|
|
|
/*
|
|
* core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
|
|
*
|
|
* @task_seq guards the task state ({en,de}queues)
|
|
* @pick_seq is the @task_seq we did a selection on
|
|
* @sched_seq is the @pick_seq we scheduled
|
|
*
|
|
* However, preemptions can cause multiple picks on the same task set.
|
|
* 'Fix' this by also increasing @task_seq for every pick.
|
|
*/
|
|
rq->core->core_task_seq++;
|
|
|
|
/*
|
|
* Optimize for common case where this CPU has no cookies
|
|
* and there are no cookied tasks running on siblings.
|
|
*/
|
|
if (!need_sync) {
|
|
for_each_class(class) {
|
|
next = class->pick_task(rq);
|
|
if (next)
|
|
break;
|
|
}
|
|
|
|
if (!next->core_cookie) {
|
|
rq->core_pick = NULL;
|
|
/*
|
|
* For robustness, update the min_vruntime_fi for
|
|
* unconstrained picks as well.
|
|
*/
|
|
WARN_ON_ONCE(fi_before);
|
|
task_vruntime_update(rq, next, false);
|
|
goto done;
|
|
}
|
|
}
|
|
|
|
for_each_cpu(i, smt_mask) {
|
|
struct rq *rq_i = cpu_rq(i);
|
|
|
|
rq_i->core_pick = NULL;
|
|
|
|
if (i != cpu)
|
|
update_rq_clock(rq_i);
|
|
}
|
|
|
|
/*
|
|
* Try and select tasks for each sibling in descending sched_class
|
|
* order.
|
|
*/
|
|
for_each_class(class) {
|
|
again:
|
|
for_each_cpu_wrap(i, smt_mask, cpu) {
|
|
struct rq *rq_i = cpu_rq(i);
|
|
struct task_struct *p;
|
|
|
|
if (rq_i->core_pick)
|
|
continue;
|
|
|
|
/*
|
|
* If this sibling doesn't yet have a suitable task to
|
|
* run; ask for the most eligible task, given the
|
|
* highest priority task already selected for this
|
|
* core.
|
|
*/
|
|
p = pick_task(rq_i, class, max, fi_before);
|
|
if (!p)
|
|
continue;
|
|
|
|
if (!is_task_rq_idle(p))
|
|
occ++;
|
|
|
|
rq_i->core_pick = p;
|
|
if (rq_i->idle == p && rq_i->nr_running) {
|
|
rq->core->core_forceidle = true;
|
|
if (!fi_before)
|
|
rq->core->core_forceidle_seq++;
|
|
}
|
|
|
|
/*
|
|
* If this new candidate is of higher priority than the
|
|
* previous; and they're incompatible; we need to wipe
|
|
* the slate and start over. pick_task makes sure that
|
|
* p's priority is more than max if it doesn't match
|
|
* max's cookie.
|
|
*
|
|
* NOTE: this is a linear max-filter and is thus bounded
|
|
* in execution time.
|
|
*/
|
|
if (!max || !cookie_match(max, p)) {
|
|
struct task_struct *old_max = max;
|
|
|
|
rq->core->core_cookie = p->core_cookie;
|
|
max = p;
|
|
|
|
if (old_max) {
|
|
rq->core->core_forceidle = false;
|
|
for_each_cpu(j, smt_mask) {
|
|
if (j == i)
|
|
continue;
|
|
|
|
cpu_rq(j)->core_pick = NULL;
|
|
}
|
|
occ = 1;
|
|
goto again;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
rq->core->core_pick_seq = rq->core->core_task_seq;
|
|
next = rq->core_pick;
|
|
rq->core_sched_seq = rq->core->core_pick_seq;
|
|
|
|
/* Something should have been selected for current CPU */
|
|
WARN_ON_ONCE(!next);
|
|
|
|
/*
|
|
* Reschedule siblings
|
|
*
|
|
* NOTE: L1TF -- at this point we're no longer running the old task and
|
|
* sending an IPI (below) ensures the sibling will no longer be running
|
|
* their task. This ensures there is no inter-sibling overlap between
|
|
* non-matching user state.
|
|
*/
|
|
for_each_cpu(i, smt_mask) {
|
|
struct rq *rq_i = cpu_rq(i);
|
|
|
|
/*
|
|
* An online sibling might have gone offline before a task
|
|
* could be picked for it, or it might be offline but later
|
|
* happen to come online, but its too late and nothing was
|
|
* picked for it. That's Ok - it will pick tasks for itself,
|
|
* so ignore it.
|
|
*/
|
|
if (!rq_i->core_pick)
|
|
continue;
|
|
|
|
/*
|
|
* Update for new !FI->FI transitions, or if continuing to be in !FI:
|
|
* fi_before fi update?
|
|
* 0 0 1
|
|
* 0 1 1
|
|
* 1 0 1
|
|
* 1 1 0
|
|
*/
|
|
if (!(fi_before && rq->core->core_forceidle))
|
|
task_vruntime_update(rq_i, rq_i->core_pick, rq->core->core_forceidle);
|
|
|
|
rq_i->core_pick->core_occupation = occ;
|
|
|
|
if (i == cpu) {
|
|
rq_i->core_pick = NULL;
|
|
continue;
|
|
}
|
|
|
|
/* Did we break L1TF mitigation requirements? */
|
|
WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
|
|
|
|
if (rq_i->curr == rq_i->core_pick) {
|
|
rq_i->core_pick = NULL;
|
|
continue;
|
|
}
|
|
|
|
resched_curr(rq_i);
|
|
}
|
|
|
|
done:
|
|
set_next_task(rq, next);
|
|
return next;
|
|
}
|
|
|
|
static bool try_steal_cookie(int this, int that)
|
|
{
|
|
struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
|
|
struct task_struct *p;
|
|
unsigned long cookie;
|
|
bool success = false;
|
|
|
|
local_irq_disable();
|
|
double_rq_lock(dst, src);
|
|
|
|
cookie = dst->core->core_cookie;
|
|
if (!cookie)
|
|
goto unlock;
|
|
|
|
if (dst->curr != dst->idle)
|
|
goto unlock;
|
|
|
|
p = sched_core_find(src, cookie);
|
|
if (p == src->idle)
|
|
goto unlock;
|
|
|
|
do {
|
|
if (p == src->core_pick || p == src->curr)
|
|
goto next;
|
|
|
|
if (!is_cpu_allowed(p, this))
|
|
goto next;
|
|
|
|
if (p->core_occupation > dst->idle->core_occupation)
|
|
goto next;
|
|
|
|
deactivate_task(src, p, 0);
|
|
set_task_cpu(p, this);
|
|
activate_task(dst, p, 0);
|
|
|
|
resched_curr(dst);
|
|
|
|
success = true;
|
|
break;
|
|
|
|
next:
|
|
p = sched_core_next(p, cookie);
|
|
} while (p);
|
|
|
|
unlock:
|
|
double_rq_unlock(dst, src);
|
|
local_irq_enable();
|
|
|
|
return success;
|
|
}
|
|
|
|
static bool steal_cookie_task(int cpu, struct sched_domain *sd)
|
|
{
|
|
int i;
|
|
|
|
for_each_cpu_wrap(i, sched_domain_span(sd), cpu) {
|
|
if (i == cpu)
|
|
continue;
|
|
|
|
if (need_resched())
|
|
break;
|
|
|
|
if (try_steal_cookie(cpu, i))
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
static void sched_core_balance(struct rq *rq)
|
|
{
|
|
struct sched_domain *sd;
|
|
int cpu = cpu_of(rq);
|
|
|
|
preempt_disable();
|
|
rcu_read_lock();
|
|
raw_spin_rq_unlock_irq(rq);
|
|
for_each_domain(cpu, sd) {
|
|
if (need_resched())
|
|
break;
|
|
|
|
if (steal_cookie_task(cpu, sd))
|
|
break;
|
|
}
|
|
raw_spin_rq_lock_irq(rq);
|
|
rcu_read_unlock();
|
|
preempt_enable();
|
|
}
|
|
|
|
static DEFINE_PER_CPU(struct callback_head, core_balance_head);
|
|
|
|
void queue_core_balance(struct rq *rq)
|
|
{
|
|
if (!sched_core_enabled(rq))
|
|
return;
|
|
|
|
if (!rq->core->core_cookie)
|
|
return;
|
|
|
|
if (!rq->nr_running) /* not forced idle */
|
|
return;
|
|
|
|
queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
|
|
}
|
|
|
|
static void sched_core_cpu_starting(unsigned int cpu)
|
|
{
|
|
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
|
|
struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
|
|
unsigned long flags;
|
|
int t;
|
|
|
|
sched_core_lock(cpu, &flags);
|
|
|
|
WARN_ON_ONCE(rq->core != rq);
|
|
|
|
/* if we're the first, we'll be our own leader */
|
|
if (cpumask_weight(smt_mask) == 1)
|
|
goto unlock;
|
|
|
|
/* find the leader */
|
|
for_each_cpu(t, smt_mask) {
|
|
if (t == cpu)
|
|
continue;
|
|
rq = cpu_rq(t);
|
|
if (rq->core == rq) {
|
|
core_rq = rq;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
|
|
goto unlock;
|
|
|
|
/* install and validate core_rq */
|
|
for_each_cpu(t, smt_mask) {
|
|
rq = cpu_rq(t);
|
|
|
|
if (t == cpu)
|
|
rq->core = core_rq;
|
|
|
|
WARN_ON_ONCE(rq->core != core_rq);
|
|
}
|
|
|
|
unlock:
|
|
sched_core_unlock(cpu, &flags);
|
|
}
|
|
|
|
static void sched_core_cpu_deactivate(unsigned int cpu)
|
|
{
|
|
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
|
|
struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
|
|
unsigned long flags;
|
|
int t;
|
|
|
|
sched_core_lock(cpu, &flags);
|
|
|
|
/* if we're the last man standing, nothing to do */
|
|
if (cpumask_weight(smt_mask) == 1) {
|
|
WARN_ON_ONCE(rq->core != rq);
|
|
goto unlock;
|
|
}
|
|
|
|
/* if we're not the leader, nothing to do */
|
|
if (rq->core != rq)
|
|
goto unlock;
|
|
|
|
/* find a new leader */
|
|
for_each_cpu(t, smt_mask) {
|
|
if (t == cpu)
|
|
continue;
|
|
core_rq = cpu_rq(t);
|
|
break;
|
|
}
|
|
|
|
if (WARN_ON_ONCE(!core_rq)) /* impossible */
|
|
goto unlock;
|
|
|
|
/* copy the shared state to the new leader */
|
|
core_rq->core_task_seq = rq->core_task_seq;
|
|
core_rq->core_pick_seq = rq->core_pick_seq;
|
|
core_rq->core_cookie = rq->core_cookie;
|
|
core_rq->core_forceidle = rq->core_forceidle;
|
|
core_rq->core_forceidle_seq = rq->core_forceidle_seq;
|
|
|
|
/* install new leader */
|
|
for_each_cpu(t, smt_mask) {
|
|
rq = cpu_rq(t);
|
|
rq->core = core_rq;
|
|
}
|
|
|
|
unlock:
|
|
sched_core_unlock(cpu, &flags);
|
|
}
|
|
|
|
static inline void sched_core_cpu_dying(unsigned int cpu)
|
|
{
|
|
struct rq *rq = cpu_rq(cpu);
|
|
|
|
if (rq->core != rq)
|
|
rq->core = rq;
|
|
}
|
|
|
|
#else /* !CONFIG_SCHED_CORE */
|
|
|
|
static inline void sched_core_cpu_starting(unsigned int cpu) {}
|
|
static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
|
|
static inline void sched_core_cpu_dying(unsigned int cpu) {}
|
|
|
|
static struct task_struct *
|
|
pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
|
{
|
|
return __pick_next_task(rq, prev, rf);
|
|
}
|
|
|
|
#endif /* CONFIG_SCHED_CORE */
|
|
|
|
/*
|
|
* Constants for the sched_mode argument of __schedule().
|
|
*
|
|
* The mode argument allows RT enabled kernels to differentiate a
|
|
* preemption from blocking on an 'sleeping' spin/rwlock. Note that
|
|
* SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to
|
|
* optimize the AND operation out and just check for zero.
|
|
*/
|
|
#define SM_NONE 0x0
|
|
#define SM_PREEMPT 0x1
|
|
#define SM_RTLOCK_WAIT 0x2
|
|
|
|
#ifndef CONFIG_PREEMPT_RT
|
|
# define SM_MASK_PREEMPT (~0U)
|
|
#else
|
|
# define SM_MASK_PREEMPT SM_PREEMPT
|
|
#endif
|
|
|
|
/*
|
|
* __schedule() is the main scheduler function.
|
|
*
|
|
* The main means of driving the scheduler and thus entering this function are:
|
|
*
|
|
* 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
|
|
*
|
|
* 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
|
|
* paths. For example, see arch/x86/entry_64.S.
|
|
*
|
|
* To drive preemption between tasks, the scheduler sets the flag in timer
|
|
* interrupt handler scheduler_tick().
|
|
*
|
|
* 3. Wakeups don't really cause entry into schedule(). They add a
|
|
* task to the run-queue and that's it.
|
|
*
|
|
* Now, if the new task added to the run-queue preempts the current
|
|
* task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
|
|
* called on the nearest possible occasion:
|
|
*
|
|
* - If the kernel is preemptible (CONFIG_PREEMPTION=y):
|
|
*
|
|
* - in syscall or exception context, at the next outmost
|
|
* preempt_enable(). (this might be as soon as the wake_up()'s
|
|
* spin_unlock()!)
|
|
*
|
|
* - in IRQ context, return from interrupt-handler to
|
|
* preemptible context
|
|
*
|
|
* - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
|
|
* then at the next:
|
|
*
|
|
* - cond_resched() call
|
|
* - explicit schedule() call
|
|
* - return from syscall or exception to user-space
|
|
* - return from interrupt-handler to user-space
|
|
*
|
|
* WARNING: must be called with preemption disabled!
|
|
*/
|
|
static void __sched notrace __schedule(unsigned int sched_mode)
|
|
{
|
|
struct task_struct *prev, *next;
|
|
unsigned long *switch_count;
|
|
unsigned long prev_state;
|
|
struct rq_flags rf;
|
|
struct rq *rq;
|
|
int cpu;
|
|
|
|
cpu = smp_processor_id();
|
|
rq = cpu_rq(cpu);
|
|
prev = rq->curr;
|
|
|
|
schedule_debug(prev, !!sched_mode);
|
|
|
|
if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
|
|
hrtick_clear(rq);
|
|
|
|
local_irq_disable();
|
|
rcu_note_context_switch(!!sched_mode);
|
|
|
|
/*
|
|
* Make sure that signal_pending_state()->signal_pending() below
|
|
* can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
|
|
* done by the caller to avoid the race with signal_wake_up():
|
|
*
|
|
* __set_current_state(@state) signal_wake_up()
|
|
* schedule() set_tsk_thread_flag(p, TIF_SIGPENDING)
|
|
* wake_up_state(p, state)
|
|
* LOCK rq->lock LOCK p->pi_state
|
|
* smp_mb__after_spinlock() smp_mb__after_spinlock()
|
|
* if (signal_pending_state()) if (p->state & @state)
|
|
*
|
|
* Also, the membarrier system call requires a full memory barrier
|
|
* after coming from user-space, before storing to rq->curr.
|
|
*/
|
|
rq_lock(rq, &rf);
|
|
smp_mb__after_spinlock();
|
|
|
|
/* Promote REQ to ACT */
|
|
rq->clock_update_flags <<= 1;
|
|
update_rq_clock(rq);
|
|
|
|
switch_count = &prev->nivcsw;
|
|
|
|
/*
|
|
* We must load prev->state once (task_struct::state is volatile), such
|
|
* that:
|
|
*
|
|
* - we form a control dependency vs deactivate_task() below.
|
|
* - ptrace_{,un}freeze_traced() can change ->state underneath us.
|
|
*/
|
|
prev_state = READ_ONCE(prev->__state);
|
|
if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
|
|
if (signal_pending_state(prev_state, prev)) {
|
|
WRITE_ONCE(prev->__state, TASK_RUNNING);
|
|
} else {
|
|
prev->sched_contributes_to_load =
|
|
(prev_state & TASK_UNINTERRUPTIBLE) &&
|
|
!(prev_state & TASK_NOLOAD) &&
|
|
!(prev->flags & PF_FROZEN);
|
|
|
|
if (prev->sched_contributes_to_load)
|
|
rq->nr_uninterruptible++;
|
|
|
|
/*
|
|
* __schedule() ttwu()
|
|
* prev_state = prev->state; if (p->on_rq && ...)
|
|
* if (prev_state) goto out;
|
|
* p->on_rq = 0; smp_acquire__after_ctrl_dep();
|
|
* p->state = TASK_WAKING
|
|
*
|
|
* Where __schedule() and ttwu() have matching control dependencies.
|
|
*
|
|
* After this, schedule() must not care about p->state any more.
|
|
*/
|
|
deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
|
|
|
|
if (prev->in_iowait) {
|
|
atomic_inc(&rq->nr_iowait);
|
|
delayacct_blkio_start();
|
|
}
|
|
}
|
|
switch_count = &prev->nvcsw;
|
|
}
|
|
|
|
next = pick_next_task(rq, prev, &rf);
|
|
clear_tsk_need_resched(prev);
|
|
clear_preempt_need_resched();
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
rq->last_seen_need_resched_ns = 0;
|
|
#endif
|
|
|
|
trace_android_rvh_schedule(prev, next, rq);
|
|
if (likely(prev != next)) {
|
|
rq->nr_switches++;
|
|
/*
|
|
* RCU users of rcu_dereference(rq->curr) may not see
|
|
* changes to task_struct made by pick_next_task().
|
|
*/
|
|
RCU_INIT_POINTER(rq->curr, next);
|
|
/*
|
|
* The membarrier system call requires each architecture
|
|
* to have a full memory barrier after updating
|
|
* rq->curr, before returning to user-space.
|
|
*
|
|
* Here are the schemes providing that barrier on the
|
|
* various architectures:
|
|
* - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
|
|
* switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
|
|
* - finish_lock_switch() for weakly-ordered
|
|
* architectures where spin_unlock is a full barrier,
|
|
* - switch_to() for arm64 (weakly-ordered, spin_unlock
|
|
* is a RELEASE barrier),
|
|
*/
|
|
++*switch_count;
|
|
|
|
migrate_disable_switch(rq, prev);
|
|
psi_sched_switch(prev, next, !task_on_rq_queued(prev));
|
|
|
|
trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next);
|
|
|
|
/* Also unlocks the rq: */
|
|
rq = context_switch(rq, prev, next, &rf);
|
|
} else {
|
|
rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
|
|
|
|
rq_unpin_lock(rq, &rf);
|
|
__balance_callbacks(rq);
|
|
raw_spin_rq_unlock_irq(rq);
|
|
}
|
|
}
|
|
|
|
void __noreturn do_task_dead(void)
|
|
{
|
|
/* Causes final put_task_struct in finish_task_switch(): */
|
|
set_special_state(TASK_DEAD);
|
|
|
|
/* Tell freezer to ignore us: */
|
|
current->flags |= PF_NOFREEZE;
|
|
|
|
__schedule(SM_NONE);
|
|
BUG();
|
|
|
|
/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
|
|
for (;;)
|
|
cpu_relax();
|
|
}
|
|
|
|
static inline void sched_submit_work(struct task_struct *tsk)
|
|
{
|
|
unsigned int task_flags;
|
|
|
|
if (task_is_running(tsk))
|
|
return;
|
|
|
|
task_flags = tsk->flags;
|
|
/*
|
|
* If a worker went to sleep, notify and ask workqueue whether
|
|
* it wants to wake up a task to maintain concurrency.
|
|
* As this function is called inside the schedule() context,
|
|
* we disable preemption to avoid it calling schedule() again
|
|
* in the possible wakeup of a kworker and because wq_worker_sleeping()
|
|
* requires it.
|
|
*/
|
|
if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
|
|
preempt_disable();
|
|
if (task_flags & PF_WQ_WORKER)
|
|
wq_worker_sleeping(tsk);
|
|
else
|
|
io_wq_worker_sleeping(tsk);
|
|
preempt_enable_no_resched();
|
|
}
|
|
|
|
/*
|
|
* spinlock and rwlock must not flush block requests. This will
|
|
* deadlock if the callback attempts to acquire a lock which is
|
|
* already acquired.
|
|
*/
|
|
SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
|
|
|
|
/*
|
|
* If we are going to sleep and we have plugged IO queued,
|
|
* make sure to submit it to avoid deadlocks.
|
|
*/
|
|
if (blk_needs_flush_plug(tsk))
|
|
blk_schedule_flush_plug(tsk);
|
|
}
|
|
|
|
static void sched_update_worker(struct task_struct *tsk)
|
|
{
|
|
if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
|
|
if (tsk->flags & PF_WQ_WORKER)
|
|
wq_worker_running(tsk);
|
|
else
|
|
io_wq_worker_running(tsk);
|
|
}
|
|
}
|
|
|
|
asmlinkage __visible void __sched schedule(void)
|
|
{
|
|
struct task_struct *tsk = current;
|
|
|
|
sched_submit_work(tsk);
|
|
do {
|
|
preempt_disable();
|
|
__schedule(SM_NONE);
|
|
sched_preempt_enable_no_resched();
|
|
} while (need_resched());
|
|
sched_update_worker(tsk);
|
|
}
|
|
EXPORT_SYMBOL(schedule);
|
|
|
|
/*
|
|
* synchronize_rcu_tasks() makes sure that no task is stuck in preempted
|
|
* state (have scheduled out non-voluntarily) by making sure that all
|
|
* tasks have either left the run queue or have gone into user space.
|
|
* As idle tasks do not do either, they must not ever be preempted
|
|
* (schedule out non-voluntarily).
|
|
*
|
|
* schedule_idle() is similar to schedule_preempt_disable() except that it
|
|
* never enables preemption because it does not call sched_submit_work().
|
|
*/
|
|
void __sched schedule_idle(void)
|
|
{
|
|
/*
|
|
* As this skips calling sched_submit_work(), which the idle task does
|
|
* regardless because that function is a nop when the task is in a
|
|
* TASK_RUNNING state, make sure this isn't used someplace that the
|
|
* current task can be in any other state. Note, idle is always in the
|
|
* TASK_RUNNING state.
|
|
*/
|
|
WARN_ON_ONCE(current->__state);
|
|
do {
|
|
__schedule(SM_NONE);
|
|
} while (need_resched());
|
|
}
|
|
|
|
#if defined(CONFIG_CONTEXT_TRACKING) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK)
|
|
asmlinkage __visible void __sched schedule_user(void)
|
|
{
|
|
/*
|
|
* If we come here after a random call to set_need_resched(),
|
|
* or we have been woken up remotely but the IPI has not yet arrived,
|
|
* we haven't yet exited the RCU idle mode. Do it here manually until
|
|
* we find a better solution.
|
|
*
|
|
* NB: There are buggy callers of this function. Ideally we
|
|
* should warn if prev_state != CONTEXT_USER, but that will trigger
|
|
* too frequently to make sense yet.
|
|
*/
|
|
enum ctx_state prev_state = exception_enter();
|
|
schedule();
|
|
exception_exit(prev_state);
|
|
}
|
|
#endif
|
|
|
|
/**
|
|
* schedule_preempt_disabled - called with preemption disabled
|
|
*
|
|
* Returns with preemption disabled. Note: preempt_count must be 1
|
|
*/
|
|
void __sched schedule_preempt_disabled(void)
|
|
{
|
|
sched_preempt_enable_no_resched();
|
|
schedule();
|
|
preempt_disable();
|
|
}
|
|
|
|
#ifdef CONFIG_PREEMPT_RT
|
|
void __sched notrace schedule_rtlock(void)
|
|
{
|
|
do {
|
|
preempt_disable();
|
|
__schedule(SM_RTLOCK_WAIT);
|
|
sched_preempt_enable_no_resched();
|
|
} while (need_resched());
|
|
}
|
|
NOKPROBE_SYMBOL(schedule_rtlock);
|
|
#endif
|
|
|
|
static void __sched notrace preempt_schedule_common(void)
|
|
{
|
|
do {
|
|
/*
|
|
* Because the function tracer can trace preempt_count_sub()
|
|
* and it also uses preempt_enable/disable_notrace(), if
|
|
* NEED_RESCHED is set, the preempt_enable_notrace() called
|
|
* by the function tracer will call this function again and
|
|
* cause infinite recursion.
|
|
*
|
|
* Preemption must be disabled here before the function
|
|
* tracer can trace. Break up preempt_disable() into two
|
|
* calls. One to disable preemption without fear of being
|
|
* traced. The other to still record the preemption latency,
|
|
* which can also be traced by the function tracer.
|
|
*/
|
|
preempt_disable_notrace();
|
|
preempt_latency_start(1);
|
|
__schedule(SM_PREEMPT);
|
|
preempt_latency_stop(1);
|
|
preempt_enable_no_resched_notrace();
|
|
|
|
/*
|
|
* Check again in case we missed a preemption opportunity
|
|
* between schedule and now.
|
|
*/
|
|
} while (need_resched());
|
|
}
|
|
|
|
#ifdef CONFIG_PREEMPTION
|
|
/*
|
|
* This is the entry point to schedule() from in-kernel preemption
|
|
* off of preempt_enable.
|
|
*/
|
|
asmlinkage __visible void __sched notrace preempt_schedule(void)
|
|
{
|
|
/*
|
|
* If there is a non-zero preempt_count or interrupts are disabled,
|
|
* we do not want to preempt the current task. Just return..
|
|
*/
|
|
if (likely(!preemptible()))
|
|
return;
|
|
|
|
preempt_schedule_common();
|
|
}
|
|
NOKPROBE_SYMBOL(preempt_schedule);
|
|
EXPORT_SYMBOL(preempt_schedule);
|
|
|
|
#ifdef CONFIG_PREEMPT_DYNAMIC
|
|
DEFINE_STATIC_CALL(preempt_schedule, __preempt_schedule_func);
|
|
EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
|
|
#endif
|
|
|
|
|
|
/**
|
|
* preempt_schedule_notrace - preempt_schedule called by tracing
|
|
*
|
|
* The tracing infrastructure uses preempt_enable_notrace to prevent
|
|
* recursion and tracing preempt enabling caused by the tracing
|
|
* infrastructure itself. But as tracing can happen in areas coming
|
|
* from userspace or just about to enter userspace, a preempt enable
|
|
* can occur before user_exit() is called. This will cause the scheduler
|
|
* to be called when the system is still in usermode.
|
|
*
|
|
* To prevent this, the preempt_enable_notrace will use this function
|
|
* instead of preempt_schedule() to exit user context if needed before
|
|
* calling the scheduler.
|
|
*/
|
|
asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
|
|
{
|
|
enum ctx_state prev_ctx;
|
|
|
|
if (likely(!preemptible()))
|
|
return;
|
|
|
|
do {
|
|
/*
|
|
* Because the function tracer can trace preempt_count_sub()
|
|
* and it also uses preempt_enable/disable_notrace(), if
|
|
* NEED_RESCHED is set, the preempt_enable_notrace() called
|
|
* by the function tracer will call this function again and
|
|
* cause infinite recursion.
|
|
*
|
|
* Preemption must be disabled here before the function
|
|
* tracer can trace. Break up preempt_disable() into two
|
|
* calls. One to disable preemption without fear of being
|
|
* traced. The other to still record the preemption latency,
|
|
* which can also be traced by the function tracer.
|
|
*/
|
|
preempt_disable_notrace();
|
|
preempt_latency_start(1);
|
|
/*
|
|
* Needs preempt disabled in case user_exit() is traced
|
|
* and the tracer calls preempt_enable_notrace() causing
|
|
* an infinite recursion.
|
|
*/
|
|
prev_ctx = exception_enter();
|
|
__schedule(SM_PREEMPT);
|
|
exception_exit(prev_ctx);
|
|
|
|
preempt_latency_stop(1);
|
|
preempt_enable_no_resched_notrace();
|
|
} while (need_resched());
|
|
}
|
|
EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
|
|
|
|
#ifdef CONFIG_PREEMPT_DYNAMIC
|
|
DEFINE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func);
|
|
EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
|
|
#endif
|
|
|
|
#endif /* CONFIG_PREEMPTION */
|
|
|
|
#ifdef CONFIG_PREEMPT_DYNAMIC
|
|
|
|
#include <linux/entry-common.h>
|
|
|
|
/*
|
|
* SC:cond_resched
|
|
* SC:might_resched
|
|
* SC:preempt_schedule
|
|
* SC:preempt_schedule_notrace
|
|
* SC:irqentry_exit_cond_resched
|
|
*
|
|
*
|
|
* NONE:
|
|
* cond_resched <- __cond_resched
|
|
* might_resched <- RET0
|
|
* preempt_schedule <- NOP
|
|
* preempt_schedule_notrace <- NOP
|
|
* irqentry_exit_cond_resched <- NOP
|
|
*
|
|
* VOLUNTARY:
|
|
* cond_resched <- __cond_resched
|
|
* might_resched <- __cond_resched
|
|
* preempt_schedule <- NOP
|
|
* preempt_schedule_notrace <- NOP
|
|
* irqentry_exit_cond_resched <- NOP
|
|
*
|
|
* FULL:
|
|
* cond_resched <- RET0
|
|
* might_resched <- RET0
|
|
* preempt_schedule <- preempt_schedule
|
|
* preempt_schedule_notrace <- preempt_schedule_notrace
|
|
* irqentry_exit_cond_resched <- irqentry_exit_cond_resched
|
|
*/
|
|
|
|
enum {
|
|
preempt_dynamic_none = 0,
|
|
preempt_dynamic_voluntary,
|
|
preempt_dynamic_full,
|
|
};
|
|
|
|
int preempt_dynamic_mode = preempt_dynamic_full;
|
|
|
|
int sched_dynamic_mode(const char *str)
|
|
{
|
|
if (!strcmp(str, "none"))
|
|
return preempt_dynamic_none;
|
|
|
|
if (!strcmp(str, "voluntary"))
|
|
return preempt_dynamic_voluntary;
|
|
|
|
if (!strcmp(str, "full"))
|
|
return preempt_dynamic_full;
|
|
|
|
return -EINVAL;
|
|
}
|
|
|
|
void sched_dynamic_update(int mode)
|
|
{
|
|
/*
|
|
* Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
|
|
* the ZERO state, which is invalid.
|
|
*/
|
|
static_call_update(cond_resched, __cond_resched);
|
|
static_call_update(might_resched, __cond_resched);
|
|
static_call_update(preempt_schedule, __preempt_schedule_func);
|
|
static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
|
|
static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
|
|
|
|
switch (mode) {
|
|
case preempt_dynamic_none:
|
|
static_call_update(cond_resched, __cond_resched);
|
|
static_call_update(might_resched, (void *)&__static_call_return0);
|
|
static_call_update(preempt_schedule, NULL);
|
|
static_call_update(preempt_schedule_notrace, NULL);
|
|
static_call_update(irqentry_exit_cond_resched, NULL);
|
|
pr_info("Dynamic Preempt: none\n");
|
|
break;
|
|
|
|
case preempt_dynamic_voluntary:
|
|
static_call_update(cond_resched, __cond_resched);
|
|
static_call_update(might_resched, __cond_resched);
|
|
static_call_update(preempt_schedule, NULL);
|
|
static_call_update(preempt_schedule_notrace, NULL);
|
|
static_call_update(irqentry_exit_cond_resched, NULL);
|
|
pr_info("Dynamic Preempt: voluntary\n");
|
|
break;
|
|
|
|
case preempt_dynamic_full:
|
|
static_call_update(cond_resched, (void *)&__static_call_return0);
|
|
static_call_update(might_resched, (void *)&__static_call_return0);
|
|
static_call_update(preempt_schedule, __preempt_schedule_func);
|
|
static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
|
|
static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
|
|
pr_info("Dynamic Preempt: full\n");
|
|
break;
|
|
}
|
|
|
|
preempt_dynamic_mode = mode;
|
|
}
|
|
|
|
static int __init setup_preempt_mode(char *str)
|
|
{
|
|
int mode = sched_dynamic_mode(str);
|
|
if (mode < 0) {
|
|
pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
|
|
return 0;
|
|
}
|
|
|
|
sched_dynamic_update(mode);
|
|
return 1;
|
|
}
|
|
__setup("preempt=", setup_preempt_mode);
|
|
|
|
#endif /* CONFIG_PREEMPT_DYNAMIC */
|
|
|
|
/*
|
|
* This is the entry point to schedule() from kernel preemption
|
|
* off of irq context.
|
|
* Note, that this is called and return with irqs disabled. This will
|
|
* protect us against recursive calling from irq.
|
|
*/
|
|
asmlinkage __visible void __sched preempt_schedule_irq(void)
|
|
{
|
|
enum ctx_state prev_state;
|
|
|
|
/* Catch callers which need to be fixed */
|
|
BUG_ON(preempt_count() || !irqs_disabled());
|
|
|
|
prev_state = exception_enter();
|
|
|
|
do {
|
|
preempt_disable();
|
|
local_irq_enable();
|
|
__schedule(SM_PREEMPT);
|
|
local_irq_disable();
|
|
sched_preempt_enable_no_resched();
|
|
} while (need_resched());
|
|
|
|
exception_exit(prev_state);
|
|
}
|
|
|
|
int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
|
|
void *key)
|
|
{
|
|
WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC | WF_ANDROID_VENDOR));
|
|
return try_to_wake_up(curr->private, mode, wake_flags);
|
|
}
|
|
EXPORT_SYMBOL(default_wake_function);
|
|
|
|
static void __setscheduler_prio(struct task_struct *p, int prio)
|
|
{
|
|
if (dl_prio(prio))
|
|
p->sched_class = &dl_sched_class;
|
|
else if (rt_prio(prio))
|
|
p->sched_class = &rt_sched_class;
|
|
else
|
|
p->sched_class = &fair_sched_class;
|
|
|
|
p->prio = prio;
|
|
}
|
|
|
|
#ifdef CONFIG_RT_MUTEXES
|
|
|
|
static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
|
|
{
|
|
if (pi_task)
|
|
prio = min(prio, pi_task->prio);
|
|
|
|
return prio;
|
|
}
|
|
|
|
static inline int rt_effective_prio(struct task_struct *p, int prio)
|
|
{
|
|
struct task_struct *pi_task = rt_mutex_get_top_task(p);
|
|
|
|
return __rt_effective_prio(pi_task, prio);
|
|
}
|
|
|
|
/*
|
|
* rt_mutex_setprio - set the current priority of a task
|
|
* @p: task to boost
|
|
* @pi_task: donor task
|
|
*
|
|
* This function changes the 'effective' priority of a task. It does
|
|
* not touch ->normal_prio like __setscheduler().
|
|
*
|
|
* Used by the rt_mutex code to implement priority inheritance
|
|
* logic. Call site only calls if the priority of the task changed.
|
|
*/
|
|
void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
|
|
{
|
|
int prio, oldprio, queued, running, queue_flag =
|
|
DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
|
|
const struct sched_class *prev_class;
|
|
struct rq_flags rf;
|
|
struct rq *rq;
|
|
|
|
trace_android_rvh_rtmutex_prepare_setprio(p, pi_task);
|
|
/* XXX used to be waiter->prio, not waiter->task->prio */
|
|
prio = __rt_effective_prio(pi_task, p->normal_prio);
|
|
|
|
/*
|
|
* If nothing changed; bail early.
|
|
*/
|
|
if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
|
|
return;
|
|
|
|
rq = __task_rq_lock(p, &rf);
|
|
update_rq_clock(rq);
|
|
/*
|
|
* Set under pi_lock && rq->lock, such that the value can be used under
|
|
* either lock.
|
|
*
|
|
* Note that there is loads of tricky to make this pointer cache work
|
|
* right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
|
|
* ensure a task is de-boosted (pi_task is set to NULL) before the
|
|
* task is allowed to run again (and can exit). This ensures the pointer
|
|
* points to a blocked task -- which guarantees the task is present.
|
|
*/
|
|
p->pi_top_task = pi_task;
|
|
|
|
/*
|
|
* For FIFO/RR we only need to set prio, if that matches we're done.
|
|
*/
|
|
if (prio == p->prio && !dl_prio(prio))
|
|
goto out_unlock;
|
|
|
|
/*
|
|
* Idle task boosting is a nono in general. There is one
|
|
* exception, when PREEMPT_RT and NOHZ is active:
|
|
*
|
|
* The idle task calls get_next_timer_interrupt() and holds
|
|
* the timer wheel base->lock on the CPU and another CPU wants
|
|
* to access the timer (probably to cancel it). We can safely
|
|
* ignore the boosting request, as the idle CPU runs this code
|
|
* with interrupts disabled and will complete the lock
|
|
* protected section without being interrupted. So there is no
|
|
* real need to boost.
|
|
*/
|
|
if (unlikely(p == rq->idle)) {
|
|
WARN_ON(p != rq->curr);
|
|
WARN_ON(p->pi_blocked_on);
|
|
goto out_unlock;
|
|
}
|
|
|
|
trace_sched_pi_setprio(p, pi_task);
|
|
oldprio = p->prio;
|
|
|
|
if (oldprio == prio)
|
|
queue_flag &= ~DEQUEUE_MOVE;
|
|
|
|
prev_class = p->sched_class;
|
|
queued = task_on_rq_queued(p);
|
|
running = task_current(rq, p);
|
|
if (queued)
|
|
dequeue_task(rq, p, queue_flag);
|
|
if (running)
|
|
put_prev_task(rq, p);
|
|
|
|
/*
|
|
* Boosting condition are:
|
|
* 1. -rt task is running and holds mutex A
|
|
* --> -dl task blocks on mutex A
|
|
*
|
|
* 2. -dl task is running and holds mutex A
|
|
* --> -dl task blocks on mutex A and could preempt the
|
|
* running task
|
|
*/
|
|
if (dl_prio(prio)) {
|
|
if (!dl_prio(p->normal_prio) ||
|
|
(pi_task && dl_prio(pi_task->prio) &&
|
|
dl_entity_preempt(&pi_task->dl, &p->dl))) {
|
|
p->dl.pi_se = pi_task->dl.pi_se;
|
|
queue_flag |= ENQUEUE_REPLENISH;
|
|
} else {
|
|
p->dl.pi_se = &p->dl;
|
|
}
|
|
} else if (rt_prio(prio)) {
|
|
if (dl_prio(oldprio))
|
|
p->dl.pi_se = &p->dl;
|
|
if (oldprio < prio)
|
|
queue_flag |= ENQUEUE_HEAD;
|
|
} else {
|
|
if (dl_prio(oldprio))
|
|
p->dl.pi_se = &p->dl;
|
|
if (rt_prio(oldprio))
|
|
p->rt.timeout = 0;
|
|
}
|
|
|
|
__setscheduler_prio(p, prio);
|
|
|
|
if (queued)
|
|
enqueue_task(rq, p, queue_flag);
|
|
if (running)
|
|
set_next_task(rq, p);
|
|
|
|
check_class_changed(rq, p, prev_class, oldprio);
|
|
out_unlock:
|
|
/* Avoid rq from going away on us: */
|
|
preempt_disable();
|
|
|
|
rq_unpin_lock(rq, &rf);
|
|
__balance_callbacks(rq);
|
|
raw_spin_rq_unlock(rq);
|
|
|
|
preempt_enable();
|
|
}
|
|
#else
|
|
static inline int rt_effective_prio(struct task_struct *p, int prio)
|
|
{
|
|
return prio;
|
|
}
|
|
#endif
|
|
|
|
void set_user_nice(struct task_struct *p, long nice)
|
|
{
|
|
bool queued, running, allowed = false;
|
|
int old_prio;
|
|
struct rq_flags rf;
|
|
struct rq *rq;
|
|
|
|
trace_android_rvh_set_user_nice(p, &nice, &allowed);
|
|
if ((task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) && !allowed)
|
|
return;
|
|
/*
|
|
* We have to be careful, if called from sys_setpriority(),
|
|
* the task might be in the middle of scheduling on another CPU.
|
|
*/
|
|
rq = task_rq_lock(p, &rf);
|
|
update_rq_clock(rq);
|
|
|
|
/*
|
|
* The RT priorities are set via sched_setscheduler(), but we still
|
|
* allow the 'normal' nice value to be set - but as expected
|
|
* it won't have any effect on scheduling until the task is
|
|
* SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
|
|
*/
|
|
if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
|
|
p->static_prio = NICE_TO_PRIO(nice);
|
|
goto out_unlock;
|
|
}
|
|
queued = task_on_rq_queued(p);
|
|
running = task_current(rq, p);
|
|
if (queued)
|
|
dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
|
|
if (running)
|
|
put_prev_task(rq, p);
|
|
|
|
p->static_prio = NICE_TO_PRIO(nice);
|
|
set_load_weight(p, true);
|
|
old_prio = p->prio;
|
|
p->prio = effective_prio(p);
|
|
|
|
if (queued)
|
|
enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
|
|
if (running)
|
|
set_next_task(rq, p);
|
|
|
|
/*
|
|
* If the task increased its priority or is running and
|
|
* lowered its priority, then reschedule its CPU:
|
|
*/
|
|
p->sched_class->prio_changed(rq, p, old_prio);
|
|
|
|
out_unlock:
|
|
task_rq_unlock(rq, p, &rf);
|
|
}
|
|
EXPORT_SYMBOL(set_user_nice);
|
|
|
|
/*
|
|
* can_nice - check if a task can reduce its nice value
|
|
* @p: task
|
|
* @nice: nice value
|
|
*/
|
|
int can_nice(const struct task_struct *p, const int nice)
|
|
{
|
|
/* Convert nice value [19,-20] to rlimit style value [1,40]: */
|
|
int nice_rlim = nice_to_rlimit(nice);
|
|
|
|
return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
|
|
capable(CAP_SYS_NICE));
|
|
}
|
|
|
|
#ifdef __ARCH_WANT_SYS_NICE
|
|
|
|
/*
|
|
* sys_nice - change the priority of the current process.
|
|
* @increment: priority increment
|
|
*
|
|
* sys_setpriority is a more generic, but much slower function that
|
|
* does similar things.
|
|
*/
|
|
SYSCALL_DEFINE1(nice, int, increment)
|
|
{
|
|
long nice, retval;
|
|
|
|
/*
|
|
* Setpriority might change our priority at the same moment.
|
|
* We don't have to worry. Conceptually one call occurs first
|
|
* and we have a single winner.
|
|
*/
|
|
increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
|
|
nice = task_nice(current) + increment;
|
|
|
|
nice = clamp_val(nice, MIN_NICE, MAX_NICE);
|
|
if (increment < 0 && !can_nice(current, nice))
|
|
return -EPERM;
|
|
|
|
retval = security_task_setnice(current, nice);
|
|
if (retval)
|
|
return retval;
|
|
|
|
set_user_nice(current, nice);
|
|
return 0;
|
|
}
|
|
|
|
#endif
|
|
|
|
/**
|
|
* task_prio - return the priority value of a given task.
|
|
* @p: the task in question.
|
|
*
|
|
* Return: The priority value as seen by users in /proc.
|
|
*
|
|
* sched policy return value kernel prio user prio/nice
|
|
*
|
|
* normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19]
|
|
* fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99]
|
|
* deadline -101 -1 0
|
|
*/
|
|
int task_prio(const struct task_struct *p)
|
|
{
|
|
return p->prio - MAX_RT_PRIO;
|
|
}
|
|
|
|
/**
|
|
* idle_cpu - is a given CPU idle currently?
|
|
* @cpu: the processor in question.
|
|
*
|
|
* Return: 1 if the CPU is currently idle. 0 otherwise.
|
|
*/
|
|
int idle_cpu(int cpu)
|
|
{
|
|
struct rq *rq = cpu_rq(cpu);
|
|
|
|
if (rq->curr != rq->idle)
|
|
return 0;
|
|
|
|
if (rq->nr_running)
|
|
return 0;
|
|
|
|
#ifdef CONFIG_SMP
|
|
if (rq->ttwu_pending)
|
|
return 0;
|
|
#endif
|
|
|
|
return 1;
|
|
}
|
|
|
|
/**
|
|
* available_idle_cpu - is a given CPU idle for enqueuing work.
|
|
* @cpu: the CPU in question.
|
|
*
|
|
* Return: 1 if the CPU is currently idle. 0 otherwise.
|
|
*/
|
|
int available_idle_cpu(int cpu)
|
|
{
|
|
if (!idle_cpu(cpu))
|
|
return 0;
|
|
|
|
if (vcpu_is_preempted(cpu))
|
|
return 0;
|
|
|
|
return 1;
|
|
}
|
|
EXPORT_SYMBOL_GPL(available_idle_cpu);
|
|
|
|
/**
|
|
* idle_task - return the idle task for a given CPU.
|
|
* @cpu: the processor in question.
|
|
*
|
|
* Return: The idle task for the CPU @cpu.
|
|
*/
|
|
struct task_struct *idle_task(int cpu)
|
|
{
|
|
return cpu_rq(cpu)->idle;
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
/*
|
|
* This function computes an effective utilization for the given CPU, to be
|
|
* used for frequency selection given the linear relation: f = u * f_max.
|
|
*
|
|
* The scheduler tracks the following metrics:
|
|
*
|
|
* cpu_util_{cfs,rt,dl,irq}()
|
|
* cpu_bw_dl()
|
|
*
|
|
* Where the cfs,rt and dl util numbers are tracked with the same metric and
|
|
* synchronized windows and are thus directly comparable.
|
|
*
|
|
* The cfs,rt,dl utilization are the running times measured with rq->clock_task
|
|
* which excludes things like IRQ and steal-time. These latter are then accrued
|
|
* in the irq utilization.
|
|
*
|
|
* The DL bandwidth number otoh is not a measured metric but a value computed
|
|
* based on the task model parameters and gives the minimal utilization
|
|
* required to meet deadlines.
|
|
*/
|
|
unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
|
|
unsigned long max, enum cpu_util_type type,
|
|
struct task_struct *p)
|
|
{
|
|
unsigned long dl_util, util, irq;
|
|
struct rq *rq = cpu_rq(cpu);
|
|
unsigned long new_util = ULONG_MAX;
|
|
|
|
trace_android_rvh_effective_cpu_util(cpu, util_cfs, max, type, p, &new_util);
|
|
if (new_util != ULONG_MAX)
|
|
return new_util;
|
|
|
|
if (!uclamp_is_used() &&
|
|
type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
|
|
return max;
|
|
}
|
|
|
|
/*
|
|
* Early check to see if IRQ/steal time saturates the CPU, can be
|
|
* because of inaccuracies in how we track these -- see
|
|
* update_irq_load_avg().
|
|
*/
|
|
irq = cpu_util_irq(rq);
|
|
if (unlikely(irq >= max))
|
|
return max;
|
|
|
|
/*
|
|
* Because the time spend on RT/DL tasks is visible as 'lost' time to
|
|
* CFS tasks and we use the same metric to track the effective
|
|
* utilization (PELT windows are synchronized) we can directly add them
|
|
* to obtain the CPU's actual utilization.
|
|
*
|
|
* CFS and RT utilization can be boosted or capped, depending on
|
|
* utilization clamp constraints requested by currently RUNNABLE
|
|
* tasks.
|
|
* When there are no CFS RUNNABLE tasks, clamps are released and
|
|
* frequency will be gracefully reduced with the utilization decay.
|
|
*/
|
|
util = util_cfs + cpu_util_rt(rq);
|
|
if (type == FREQUENCY_UTIL)
|
|
util = uclamp_rq_util_with(rq, util, p);
|
|
|
|
dl_util = cpu_util_dl(rq);
|
|
|
|
/*
|
|
* For frequency selection we do not make cpu_util_dl() a permanent part
|
|
* of this sum because we want to use cpu_bw_dl() later on, but we need
|
|
* to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
|
|
* that we select f_max when there is no idle time.
|
|
*
|
|
* NOTE: numerical errors or stop class might cause us to not quite hit
|
|
* saturation when we should -- something for later.
|
|
*/
|
|
if (util + dl_util >= max)
|
|
return max;
|
|
|
|
/*
|
|
* OTOH, for energy computation we need the estimated running time, so
|
|
* include util_dl and ignore dl_bw.
|
|
*/
|
|
if (type == ENERGY_UTIL)
|
|
util += dl_util;
|
|
|
|
/*
|
|
* There is still idle time; further improve the number by using the
|
|
* irq metric. Because IRQ/steal time is hidden from the task clock we
|
|
* need to scale the task numbers:
|
|
*
|
|
* max - irq
|
|
* U' = irq + --------- * U
|
|
* max
|
|
*/
|
|
util = scale_irq_capacity(util, irq, max);
|
|
util += irq;
|
|
|
|
/*
|
|
* Bandwidth required by DEADLINE must always be granted while, for
|
|
* FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
|
|
* to gracefully reduce the frequency when no tasks show up for longer
|
|
* periods of time.
|
|
*
|
|
* Ideally we would like to set bw_dl as min/guaranteed freq and util +
|
|
* bw_dl as requested freq. However, cpufreq is not yet ready for such
|
|
* an interface. So, we only do the latter for now.
|
|
*/
|
|
if (type == FREQUENCY_UTIL)
|
|
util += cpu_bw_dl(rq);
|
|
|
|
return min(max, util);
|
|
}
|
|
|
|
unsigned long sched_cpu_util(int cpu, unsigned long max)
|
|
{
|
|
return effective_cpu_util(cpu, cpu_util_cfs(cpu_rq(cpu)), max,
|
|
ENERGY_UTIL, NULL);
|
|
}
|
|
#endif /* CONFIG_SMP */
|
|
|
|
/**
|
|
* find_process_by_pid - find a process with a matching PID value.
|
|
* @pid: the pid in question.
|
|
*
|
|
* The task of @pid, if found. %NULL otherwise.
|
|
*/
|
|
static struct task_struct *find_process_by_pid(pid_t pid)
|
|
{
|
|
return pid ? find_task_by_vpid(pid) : current;
|
|
}
|
|
|
|
/*
|
|
* sched_setparam() passes in -1 for its policy, to let the functions
|
|
* it calls know not to change it.
|
|
*/
|
|
#define SETPARAM_POLICY -1
|
|
|
|
static void __setscheduler_params(struct task_struct *p,
|
|
const struct sched_attr *attr)
|
|
{
|
|
int policy = attr->sched_policy;
|
|
|
|
if (policy == SETPARAM_POLICY)
|
|
policy = p->policy;
|
|
|
|
p->policy = policy;
|
|
|
|
if (dl_policy(policy))
|
|
__setparam_dl(p, attr);
|
|
else if (fair_policy(policy))
|
|
p->static_prio = NICE_TO_PRIO(attr->sched_nice);
|
|
|
|
/*
|
|
* __sched_setscheduler() ensures attr->sched_priority == 0 when
|
|
* !rt_policy. Always setting this ensures that things like
|
|
* getparam()/getattr() don't report silly values for !rt tasks.
|
|
*/
|
|
p->rt_priority = attr->sched_priority;
|
|
p->normal_prio = normal_prio(p);
|
|
set_load_weight(p, true);
|
|
}
|
|
|
|
/*
|
|
* Check the target process has a UID that matches the current process's:
|
|
*/
|
|
static bool check_same_owner(struct task_struct *p)
|
|
{
|
|
const struct cred *cred = current_cred(), *pcred;
|
|
bool match;
|
|
|
|
rcu_read_lock();
|
|
pcred = __task_cred(p);
|
|
match = (uid_eq(cred->euid, pcred->euid) ||
|
|
uid_eq(cred->euid, pcred->uid));
|
|
rcu_read_unlock();
|
|
return match;
|
|
}
|
|
|
|
static int __sched_setscheduler(struct task_struct *p,
|
|
const struct sched_attr *attr,
|
|
bool user, bool pi)
|
|
{
|
|
int oldpolicy = -1, policy = attr->sched_policy;
|
|
int retval, oldprio, newprio, queued, running;
|
|
const struct sched_class *prev_class;
|
|
struct callback_head *head;
|
|
struct rq_flags rf;
|
|
int reset_on_fork;
|
|
int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
|
|
struct rq *rq;
|
|
|
|
/* The pi code expects interrupts enabled */
|
|
BUG_ON(pi && in_interrupt());
|
|
recheck:
|
|
/* Double check policy once rq lock held: */
|
|
if (policy < 0) {
|
|
reset_on_fork = p->sched_reset_on_fork;
|
|
policy = oldpolicy = p->policy;
|
|
} else {
|
|
reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
|
|
|
|
if (!valid_policy(policy))
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* Valid priorities for SCHED_FIFO and SCHED_RR are
|
|
* 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL,
|
|
* SCHED_BATCH and SCHED_IDLE is 0.
|
|
*/
|
|
if (attr->sched_priority > MAX_RT_PRIO-1)
|
|
return -EINVAL;
|
|
if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
|
|
(rt_policy(policy) != (attr->sched_priority != 0)))
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* Allow unprivileged RT tasks to decrease priority:
|
|
*/
|
|
if (user && !capable(CAP_SYS_NICE)) {
|
|
if (fair_policy(policy)) {
|
|
if (attr->sched_nice < task_nice(p) &&
|
|
!can_nice(p, attr->sched_nice))
|
|
return -EPERM;
|
|
}
|
|
|
|
if (rt_policy(policy)) {
|
|
unsigned long rlim_rtprio =
|
|
task_rlimit(p, RLIMIT_RTPRIO);
|
|
|
|
/* Can't set/change the rt policy: */
|
|
if (policy != p->policy && !rlim_rtprio)
|
|
return -EPERM;
|
|
|
|
/* Can't increase priority: */
|
|
if (attr->sched_priority > p->rt_priority &&
|
|
attr->sched_priority > rlim_rtprio)
|
|
return -EPERM;
|
|
}
|
|
|
|
/*
|
|
* Can't set/change SCHED_DEADLINE policy at all for now
|
|
* (safest behavior); in the future we would like to allow
|
|
* unprivileged DL tasks to increase their relative deadline
|
|
* or reduce their runtime (both ways reducing utilization)
|
|
*/
|
|
if (dl_policy(policy))
|
|
return -EPERM;
|
|
|
|
/*
|
|
* Treat SCHED_IDLE as nice 20. Only allow a switch to
|
|
* SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
|
|
*/
|
|
if (task_has_idle_policy(p) && !idle_policy(policy)) {
|
|
if (!can_nice(p, task_nice(p)))
|
|
return -EPERM;
|
|
}
|
|
|
|
/* Can't change other user's priorities: */
|
|
if (!check_same_owner(p))
|
|
return -EPERM;
|
|
|
|
/* Normal users shall not reset the sched_reset_on_fork flag: */
|
|
if (p->sched_reset_on_fork && !reset_on_fork)
|
|
return -EPERM;
|
|
|
|
/* Can't change util-clamps */
|
|
if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
|
|
return -EPERM;
|
|
}
|
|
|
|
if (user) {
|
|
if (attr->sched_flags & SCHED_FLAG_SUGOV)
|
|
return -EINVAL;
|
|
|
|
retval = security_task_setscheduler(p);
|
|
if (retval)
|
|
return retval;
|
|
}
|
|
|
|
/* Update task specific "requested" clamps */
|
|
if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
|
|
retval = uclamp_validate(p, attr);
|
|
if (retval)
|
|
return retval;
|
|
}
|
|
|
|
/*
|
|
* Make sure no PI-waiters arrive (or leave) while we are
|
|
* changing the priority of the task:
|
|
*
|
|
* To be able to change p->policy safely, the appropriate
|
|
* runqueue lock must be held.
|
|
*/
|
|
rq = task_rq_lock(p, &rf);
|
|
update_rq_clock(rq);
|
|
|
|
/*
|
|
* Changing the policy of the stop threads its a very bad idea:
|
|
*/
|
|
if (p == rq->stop) {
|
|
retval = -EINVAL;
|
|
goto unlock;
|
|
}
|
|
|
|
/*
|
|
* If not changing anything there's no need to proceed further,
|
|
* but store a possible modification of reset_on_fork.
|
|
*/
|
|
if (unlikely(policy == p->policy)) {
|
|
if (fair_policy(policy) && attr->sched_nice != task_nice(p))
|
|
goto change;
|
|
if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
|
|
goto change;
|
|
if (dl_policy(policy) && dl_param_changed(p, attr))
|
|
goto change;
|
|
if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
|
|
goto change;
|
|
|
|
p->sched_reset_on_fork = reset_on_fork;
|
|
retval = 0;
|
|
goto unlock;
|
|
}
|
|
change:
|
|
|
|
if (user) {
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
/*
|
|
* Do not allow realtime tasks into groups that have no runtime
|
|
* assigned.
|
|
*/
|
|
if (rt_bandwidth_enabled() && rt_policy(policy) &&
|
|
task_group(p)->rt_bandwidth.rt_runtime == 0 &&
|
|
!task_group_is_autogroup(task_group(p))) {
|
|
retval = -EPERM;
|
|
goto unlock;
|
|
}
|
|
#endif
|
|
#ifdef CONFIG_SMP
|
|
if (dl_bandwidth_enabled() && dl_policy(policy) &&
|
|
!(attr->sched_flags & SCHED_FLAG_SUGOV)) {
|
|
cpumask_t *span = rq->rd->span;
|
|
|
|
/*
|
|
* Don't allow tasks with an affinity mask smaller than
|
|
* the entire root_domain to become SCHED_DEADLINE. We
|
|
* will also fail if there's no bandwidth available.
|
|
*/
|
|
if (!cpumask_subset(span, p->cpus_ptr) ||
|
|
rq->rd->dl_bw.bw == 0) {
|
|
retval = -EPERM;
|
|
goto unlock;
|
|
}
|
|
}
|
|
#endif
|
|
}
|
|
|
|
/* Re-check policy now with rq lock held: */
|
|
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
|
|
policy = oldpolicy = -1;
|
|
task_rq_unlock(rq, p, &rf);
|
|
goto recheck;
|
|
}
|
|
|
|
/*
|
|
* If setscheduling to SCHED_DEADLINE (or changing the parameters
|
|
* of a SCHED_DEADLINE task) we need to check if enough bandwidth
|
|
* is available.
|
|
*/
|
|
if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
|
|
retval = -EBUSY;
|
|
goto unlock;
|
|
}
|
|
|
|
p->sched_reset_on_fork = reset_on_fork;
|
|
oldprio = p->prio;
|
|
|
|
newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
|
|
if (pi) {
|
|
/*
|
|
* Take priority boosted tasks into account. If the new
|
|
* effective priority is unchanged, we just store the new
|
|
* normal parameters and do not touch the scheduler class and
|
|
* the runqueue. This will be done when the task deboost
|
|
* itself.
|
|
*/
|
|
newprio = rt_effective_prio(p, newprio);
|
|
if (newprio == oldprio)
|
|
queue_flags &= ~DEQUEUE_MOVE;
|
|
}
|
|
|
|
queued = task_on_rq_queued(p);
|
|
running = task_current(rq, p);
|
|
if (queued)
|
|
dequeue_task(rq, p, queue_flags);
|
|
if (running)
|
|
put_prev_task(rq, p);
|
|
|
|
prev_class = p->sched_class;
|
|
|
|
if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
|
|
__setscheduler_params(p, attr);
|
|
__setscheduler_prio(p, newprio);
|
|
}
|
|
__setscheduler_uclamp(p, attr);
|
|
|
|
if (queued) {
|
|
/*
|
|
* We enqueue to tail when the priority of a task is
|
|
* increased (user space view).
|
|
*/
|
|
if (oldprio < p->prio)
|
|
queue_flags |= ENQUEUE_HEAD;
|
|
|
|
enqueue_task(rq, p, queue_flags);
|
|
}
|
|
if (running)
|
|
set_next_task(rq, p);
|
|
|
|
check_class_changed(rq, p, prev_class, oldprio);
|
|
|
|
/* Avoid rq from going away on us: */
|
|
preempt_disable();
|
|
head = splice_balance_callbacks(rq);
|
|
task_rq_unlock(rq, p, &rf);
|
|
|
|
if (pi)
|
|
rt_mutex_adjust_pi(p);
|
|
|
|
/* Run balance callbacks after we've adjusted the PI chain: */
|
|
balance_callbacks(rq, head);
|
|
preempt_enable();
|
|
|
|
return 0;
|
|
|
|
unlock:
|
|
task_rq_unlock(rq, p, &rf);
|
|
return retval;
|
|
}
|
|
|
|
static int _sched_setscheduler(struct task_struct *p, int policy,
|
|
const struct sched_param *param, bool check)
|
|
{
|
|
struct sched_attr attr = {
|
|
.sched_policy = policy,
|
|
.sched_priority = param->sched_priority,
|
|
.sched_nice = PRIO_TO_NICE(p->static_prio),
|
|
};
|
|
|
|
/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
|
|
if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
|
|
attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
|
|
policy &= ~SCHED_RESET_ON_FORK;
|
|
attr.sched_policy = policy;
|
|
}
|
|
|
|
return __sched_setscheduler(p, &attr, check, true);
|
|
}
|
|
/**
|
|
* sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
|
|
* @p: the task in question.
|
|
* @policy: new policy.
|
|
* @param: structure containing the new RT priority.
|
|
*
|
|
* Use sched_set_fifo(), read its comment.
|
|
*
|
|
* Return: 0 on success. An error code otherwise.
|
|
*
|
|
* NOTE that the task may be already dead.
|
|
*/
|
|
int sched_setscheduler(struct task_struct *p, int policy,
|
|
const struct sched_param *param)
|
|
{
|
|
return _sched_setscheduler(p, policy, param, true);
|
|
}
|
|
EXPORT_SYMBOL_GPL(sched_setscheduler);
|
|
|
|
int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
|
|
{
|
|
return __sched_setscheduler(p, attr, true, true);
|
|
}
|
|
EXPORT_SYMBOL_GPL(sched_setattr);
|
|
|
|
int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
|
|
{
|
|
return __sched_setscheduler(p, attr, false, true);
|
|
}
|
|
EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
|
|
|
|
/**
|
|
* sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
|
|
* @p: the task in question.
|
|
* @policy: new policy.
|
|
* @param: structure containing the new RT priority.
|
|
*
|
|
* Just like sched_setscheduler, only don't bother checking if the
|
|
* current context has permission. For example, this is needed in
|
|
* stop_machine(): we create temporary high priority worker threads,
|
|
* but our caller might not have that capability.
|
|
*
|
|
* Return: 0 on success. An error code otherwise.
|
|
*/
|
|
int sched_setscheduler_nocheck(struct task_struct *p, int policy,
|
|
const struct sched_param *param)
|
|
{
|
|
return _sched_setscheduler(p, policy, param, false);
|
|
}
|
|
EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
|
|
|
|
/*
|
|
* SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
|
|
* incapable of resource management, which is the one thing an OS really should
|
|
* be doing.
|
|
*
|
|
* This is of course the reason it is limited to privileged users only.
|
|
*
|
|
* Worse still; it is fundamentally impossible to compose static priority
|
|
* workloads. You cannot take two correctly working static prio workloads
|
|
* and smash them together and still expect them to work.
|
|
*
|
|
* For this reason 'all' FIFO tasks the kernel creates are basically at:
|
|
*
|
|
* MAX_RT_PRIO / 2
|
|
*
|
|
* The administrator _MUST_ configure the system, the kernel simply doesn't
|
|
* know enough information to make a sensible choice.
|
|
*/
|
|
void sched_set_fifo(struct task_struct *p)
|
|
{
|
|
struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
|
|
WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
|
|
}
|
|
EXPORT_SYMBOL_GPL(sched_set_fifo);
|
|
|
|
/*
|
|
* For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
|
|
*/
|
|
void sched_set_fifo_low(struct task_struct *p)
|
|
{
|
|
struct sched_param sp = { .sched_priority = 1 };
|
|
WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
|
|
}
|
|
EXPORT_SYMBOL_GPL(sched_set_fifo_low);
|
|
|
|
void sched_set_normal(struct task_struct *p, int nice)
|
|
{
|
|
struct sched_attr attr = {
|
|
.sched_policy = SCHED_NORMAL,
|
|
.sched_nice = nice,
|
|
};
|
|
WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
|
|
}
|
|
EXPORT_SYMBOL_GPL(sched_set_normal);
|
|
|
|
static int
|
|
do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
|
|
{
|
|
struct sched_param lparam;
|
|
struct task_struct *p;
|
|
int retval;
|
|
|
|
if (!param || pid < 0)
|
|
return -EINVAL;
|
|
if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
|
|
return -EFAULT;
|
|
|
|
rcu_read_lock();
|
|
retval = -ESRCH;
|
|
p = find_process_by_pid(pid);
|
|
if (p != NULL)
|
|
retval = sched_setscheduler(p, policy, &lparam);
|
|
rcu_read_unlock();
|
|
|
|
return retval;
|
|
}
|
|
|
|
/*
|
|
* Mimics kernel/events/core.c perf_copy_attr().
|
|
*/
|
|
static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
|
|
{
|
|
u32 size;
|
|
int ret;
|
|
|
|
/* Zero the full structure, so that a short copy will be nice: */
|
|
memset(attr, 0, sizeof(*attr));
|
|
|
|
ret = get_user(size, &uattr->size);
|
|
if (ret)
|
|
return ret;
|
|
|
|
/* ABI compatibility quirk: */
|
|
if (!size)
|
|
size = SCHED_ATTR_SIZE_VER0;
|
|
if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
|
|
goto err_size;
|
|
|
|
ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
|
|
if (ret) {
|
|
if (ret == -E2BIG)
|
|
goto err_size;
|
|
return ret;
|
|
}
|
|
|
|
if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
|
|
size < SCHED_ATTR_SIZE_VER1)
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* XXX: Do we want to be lenient like existing syscalls; or do we want
|
|
* to be strict and return an error on out-of-bounds values?
|
|
*/
|
|
attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
|
|
|
|
return 0;
|
|
|
|
err_size:
|
|
put_user(sizeof(*attr), &uattr->size);
|
|
return -E2BIG;
|
|
}
|
|
|
|
static void get_params(struct task_struct *p, struct sched_attr *attr)
|
|
{
|
|
if (task_has_dl_policy(p))
|
|
__getparam_dl(p, attr);
|
|
else if (task_has_rt_policy(p))
|
|
attr->sched_priority = p->rt_priority;
|
|
else
|
|
attr->sched_nice = task_nice(p);
|
|
}
|
|
|
|
/**
|
|
* sys_sched_setscheduler - set/change the scheduler policy and RT priority
|
|
* @pid: the pid in question.
|
|
* @policy: new policy.
|
|
* @param: structure containing the new RT priority.
|
|
*
|
|
* Return: 0 on success. An error code otherwise.
|
|
*/
|
|
SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
|
|
{
|
|
if (policy < 0)
|
|
return -EINVAL;
|
|
|
|
return do_sched_setscheduler(pid, policy, param);
|
|
}
|
|
|
|
/**
|
|
* sys_sched_setparam - set/change the RT priority of a thread
|
|
* @pid: the pid in question.
|
|
* @param: structure containing the new RT priority.
|
|
*
|
|
* Return: 0 on success. An error code otherwise.
|
|
*/
|
|
SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
|
|
{
|
|
return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
|
|
}
|
|
|
|
/**
|
|
* sys_sched_setattr - same as above, but with extended sched_attr
|
|
* @pid: the pid in question.
|
|
* @uattr: structure containing the extended parameters.
|
|
* @flags: for future extension.
|
|
*/
|
|
SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
|
|
unsigned int, flags)
|
|
{
|
|
struct sched_attr attr;
|
|
struct task_struct *p;
|
|
int retval;
|
|
|
|
if (!uattr || pid < 0 || flags)
|
|
return -EINVAL;
|
|
|
|
retval = sched_copy_attr(uattr, &attr);
|
|
if (retval)
|
|
return retval;
|
|
|
|
if ((int)attr.sched_policy < 0)
|
|
return -EINVAL;
|
|
if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
|
|
attr.sched_policy = SETPARAM_POLICY;
|
|
|
|
rcu_read_lock();
|
|
retval = -ESRCH;
|
|
p = find_process_by_pid(pid);
|
|
if (likely(p))
|
|
get_task_struct(p);
|
|
rcu_read_unlock();
|
|
|
|
if (likely(p)) {
|
|
if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
|
|
get_params(p, &attr);
|
|
retval = sched_setattr(p, &attr);
|
|
put_task_struct(p);
|
|
}
|
|
|
|
return retval;
|
|
}
|
|
|
|
/**
|
|
* sys_sched_getscheduler - get the policy (scheduling class) of a thread
|
|
* @pid: the pid in question.
|
|
*
|
|
* Return: On success, the policy of the thread. Otherwise, a negative error
|
|
* code.
|
|
*/
|
|
SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
|
|
{
|
|
struct task_struct *p;
|
|
int retval;
|
|
|
|
if (pid < 0)
|
|
return -EINVAL;
|
|
|
|
retval = -ESRCH;
|
|
rcu_read_lock();
|
|
p = find_process_by_pid(pid);
|
|
if (p) {
|
|
retval = security_task_getscheduler(p);
|
|
if (!retval)
|
|
retval = p->policy
|
|
| (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
|
|
}
|
|
rcu_read_unlock();
|
|
return retval;
|
|
}
|
|
|
|
/**
|
|
* sys_sched_getparam - get the RT priority of a thread
|
|
* @pid: the pid in question.
|
|
* @param: structure containing the RT priority.
|
|
*
|
|
* Return: On success, 0 and the RT priority is in @param. Otherwise, an error
|
|
* code.
|
|
*/
|
|
SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
|
|
{
|
|
struct sched_param lp = { .sched_priority = 0 };
|
|
struct task_struct *p;
|
|
int retval;
|
|
|
|
if (!param || pid < 0)
|
|
return -EINVAL;
|
|
|
|
rcu_read_lock();
|
|
p = find_process_by_pid(pid);
|
|
retval = -ESRCH;
|
|
if (!p)
|
|
goto out_unlock;
|
|
|
|
retval = security_task_getscheduler(p);
|
|
if (retval)
|
|
goto out_unlock;
|
|
|
|
if (task_has_rt_policy(p))
|
|
lp.sched_priority = p->rt_priority;
|
|
rcu_read_unlock();
|
|
|
|
/*
|
|
* This one might sleep, we cannot do it with a spinlock held ...
|
|
*/
|
|
retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
|
|
|
|
return retval;
|
|
|
|
out_unlock:
|
|
rcu_read_unlock();
|
|
return retval;
|
|
}
|
|
|
|
/*
|
|
* Copy the kernel size attribute structure (which might be larger
|
|
* than what user-space knows about) to user-space.
|
|
*
|
|
* Note that all cases are valid: user-space buffer can be larger or
|
|
* smaller than the kernel-space buffer. The usual case is that both
|
|
* have the same size.
|
|
*/
|
|
static int
|
|
sched_attr_copy_to_user(struct sched_attr __user *uattr,
|
|
struct sched_attr *kattr,
|
|
unsigned int usize)
|
|
{
|
|
unsigned int ksize = sizeof(*kattr);
|
|
|
|
if (!access_ok(uattr, usize))
|
|
return -EFAULT;
|
|
|
|
/*
|
|
* sched_getattr() ABI forwards and backwards compatibility:
|
|
*
|
|
* If usize == ksize then we just copy everything to user-space and all is good.
|
|
*
|
|
* If usize < ksize then we only copy as much as user-space has space for,
|
|
* this keeps ABI compatibility as well. We skip the rest.
|
|
*
|
|
* If usize > ksize then user-space is using a newer version of the ABI,
|
|
* which part the kernel doesn't know about. Just ignore it - tooling can
|
|
* detect the kernel's knowledge of attributes from the attr->size value
|
|
* which is set to ksize in this case.
|
|
*/
|
|
kattr->size = min(usize, ksize);
|
|
|
|
if (copy_to_user(uattr, kattr, kattr->size))
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* sys_sched_getattr - similar to sched_getparam, but with sched_attr
|
|
* @pid: the pid in question.
|
|
* @uattr: structure containing the extended parameters.
|
|
* @usize: sizeof(attr) for fwd/bwd comp.
|
|
* @flags: for future extension.
|
|
*/
|
|
SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
|
|
unsigned int, usize, unsigned int, flags)
|
|
{
|
|
struct sched_attr kattr = { };
|
|
struct task_struct *p;
|
|
int retval;
|
|
|
|
if (!uattr || pid < 0 || usize > PAGE_SIZE ||
|
|
usize < SCHED_ATTR_SIZE_VER0 || flags)
|
|
return -EINVAL;
|
|
|
|
rcu_read_lock();
|
|
p = find_process_by_pid(pid);
|
|
retval = -ESRCH;
|
|
if (!p)
|
|
goto out_unlock;
|
|
|
|
retval = security_task_getscheduler(p);
|
|
if (retval)
|
|
goto out_unlock;
|
|
|
|
kattr.sched_policy = p->policy;
|
|
if (p->sched_reset_on_fork)
|
|
kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
|
|
get_params(p, &kattr);
|
|
kattr.sched_flags &= SCHED_FLAG_ALL;
|
|
|
|
#ifdef CONFIG_UCLAMP_TASK
|
|
/*
|
|
* This could race with another potential updater, but this is fine
|
|
* because it'll correctly read the old or the new value. We don't need
|
|
* to guarantee who wins the race as long as it doesn't return garbage.
|
|
*/
|
|
kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
|
|
kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
|
|
#endif
|
|
|
|
rcu_read_unlock();
|
|
|
|
return sched_attr_copy_to_user(uattr, &kattr, usize);
|
|
|
|
out_unlock:
|
|
rcu_read_unlock();
|
|
return retval;
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
|
|
{
|
|
int ret = 0;
|
|
|
|
/*
|
|
* If the task isn't a deadline task or admission control is
|
|
* disabled then we don't care about affinity changes.
|
|
*/
|
|
if (!task_has_dl_policy(p) || !dl_bandwidth_enabled())
|
|
return 0;
|
|
|
|
/*
|
|
* Since bandwidth control happens on root_domain basis,
|
|
* if admission test is enabled, we only admit -deadline
|
|
* tasks allowed to run on all the CPUs in the task's
|
|
* root_domain.
|
|
*/
|
|
rcu_read_lock();
|
|
if (!cpumask_subset(task_rq(p)->rd->span, mask))
|
|
ret = -EBUSY;
|
|
rcu_read_unlock();
|
|
return ret;
|
|
}
|
|
#endif
|
|
|
|
static int
|
|
__sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
|
|
{
|
|
int retval;
|
|
cpumask_var_t cpus_allowed, new_mask;
|
|
|
|
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
|
|
return -ENOMEM;
|
|
|
|
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
|
|
retval = -ENOMEM;
|
|
goto out_free_cpus_allowed;
|
|
}
|
|
|
|
cpuset_cpus_allowed(p, cpus_allowed);
|
|
cpumask_and(new_mask, mask, cpus_allowed);
|
|
|
|
retval = dl_task_check_affinity(p, new_mask);
|
|
if (retval)
|
|
goto out_free_new_mask;
|
|
again:
|
|
retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER);
|
|
if (retval)
|
|
goto out_free_new_mask;
|
|
|
|
cpuset_cpus_allowed(p, cpus_allowed);
|
|
if (!cpumask_subset(new_mask, cpus_allowed)) {
|
|
/*
|
|
* We must have raced with a concurrent cpuset update.
|
|
* Just reset the cpumask to the cpuset's cpus_allowed.
|
|
*/
|
|
cpumask_copy(new_mask, cpus_allowed);
|
|
goto again;
|
|
}
|
|
|
|
out_free_new_mask:
|
|
free_cpumask_var(new_mask);
|
|
out_free_cpus_allowed:
|
|
free_cpumask_var(cpus_allowed);
|
|
return retval;
|
|
}
|
|
|
|
long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
|
|
{
|
|
struct task_struct *p;
|
|
int retval = 0;
|
|
int skip = 0;
|
|
|
|
rcu_read_lock();
|
|
|
|
p = find_process_by_pid(pid);
|
|
if (!p) {
|
|
rcu_read_unlock();
|
|
return -ESRCH;
|
|
}
|
|
|
|
/* Prevent p going away */
|
|
get_task_struct(p);
|
|
rcu_read_unlock();
|
|
|
|
if (p->flags & PF_NO_SETAFFINITY) {
|
|
retval = -EINVAL;
|
|
goto out_put_task;
|
|
}
|
|
|
|
if (!check_same_owner(p)) {
|
|
rcu_read_lock();
|
|
if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
|
|
rcu_read_unlock();
|
|
retval = -EPERM;
|
|
goto out_put_task;
|
|
}
|
|
rcu_read_unlock();
|
|
}
|
|
|
|
trace_android_vh_sched_setaffinity_early(p, in_mask, &skip);
|
|
if (skip)
|
|
goto out_put_task;
|
|
retval = security_task_setscheduler(p);
|
|
if (retval)
|
|
goto out_put_task;
|
|
|
|
retval = __sched_setaffinity(p, in_mask);
|
|
trace_android_rvh_sched_setaffinity(p, in_mask, &retval);
|
|
|
|
out_put_task:
|
|
put_task_struct(p);
|
|
return retval;
|
|
}
|
|
|
|
static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
|
|
struct cpumask *new_mask)
|
|
{
|
|
if (len < cpumask_size())
|
|
cpumask_clear(new_mask);
|
|
else if (len > cpumask_size())
|
|
len = cpumask_size();
|
|
|
|
return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
|
|
}
|
|
|
|
/**
|
|
* sys_sched_setaffinity - set the CPU affinity of a process
|
|
* @pid: pid of the process
|
|
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
|
|
* @user_mask_ptr: user-space pointer to the new CPU mask
|
|
*
|
|
* Return: 0 on success. An error code otherwise.
|
|
*/
|
|
SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
|
|
unsigned long __user *, user_mask_ptr)
|
|
{
|
|
cpumask_var_t new_mask;
|
|
int retval;
|
|
|
|
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
|
|
return -ENOMEM;
|
|
|
|
retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
|
|
if (retval == 0)
|
|
retval = sched_setaffinity(pid, new_mask);
|
|
free_cpumask_var(new_mask);
|
|
return retval;
|
|
}
|
|
|
|
long sched_getaffinity(pid_t pid, struct cpumask *mask)
|
|
{
|
|
struct task_struct *p;
|
|
unsigned long flags;
|
|
int retval;
|
|
|
|
rcu_read_lock();
|
|
|
|
retval = -ESRCH;
|
|
p = find_process_by_pid(pid);
|
|
if (!p)
|
|
goto out_unlock;
|
|
|
|
retval = security_task_getscheduler(p);
|
|
if (retval)
|
|
goto out_unlock;
|
|
|
|
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
|
cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
|
|
trace_android_rvh_sched_getaffinity(p, mask);
|
|
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
|
|
|
out_unlock:
|
|
rcu_read_unlock();
|
|
|
|
return retval;
|
|
}
|
|
|
|
/**
|
|
* sys_sched_getaffinity - get the CPU affinity of a process
|
|
* @pid: pid of the process
|
|
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
|
|
* @user_mask_ptr: user-space pointer to hold the current CPU mask
|
|
*
|
|
* Return: size of CPU mask copied to user_mask_ptr on success. An
|
|
* error code otherwise.
|
|
*/
|
|
SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
|
|
unsigned long __user *, user_mask_ptr)
|
|
{
|
|
int ret;
|
|
cpumask_var_t mask;
|
|
|
|
if ((len * BITS_PER_BYTE) < nr_cpu_ids)
|
|
return -EINVAL;
|
|
if (len & (sizeof(unsigned long)-1))
|
|
return -EINVAL;
|
|
|
|
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
|
|
return -ENOMEM;
|
|
|
|
ret = sched_getaffinity(pid, mask);
|
|
if (ret == 0) {
|
|
unsigned int retlen = min(len, cpumask_size());
|
|
|
|
if (copy_to_user(user_mask_ptr, mask, retlen))
|
|
ret = -EFAULT;
|
|
else
|
|
ret = retlen;
|
|
}
|
|
free_cpumask_var(mask);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void do_sched_yield(void)
|
|
{
|
|
struct rq_flags rf;
|
|
struct rq *rq;
|
|
|
|
rq = this_rq_lock_irq(&rf);
|
|
|
|
schedstat_inc(rq->yld_count);
|
|
current->sched_class->yield_task(rq);
|
|
|
|
trace_android_rvh_do_sched_yield(rq);
|
|
|
|
preempt_disable();
|
|
rq_unlock_irq(rq, &rf);
|
|
sched_preempt_enable_no_resched();
|
|
|
|
schedule();
|
|
}
|
|
|
|
/**
|
|
* sys_sched_yield - yield the current processor to other threads.
|
|
*
|
|
* This function yields the current CPU to other tasks. If there are no
|
|
* other threads running on this CPU then this function will return.
|
|
*
|
|
* Return: 0.
|
|
*/
|
|
SYSCALL_DEFINE0(sched_yield)
|
|
{
|
|
do_sched_yield();
|
|
return 0;
|
|
}
|
|
|
|
#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
|
|
int __sched __cond_resched(void)
|
|
{
|
|
if (should_resched(0)) {
|
|
preempt_schedule_common();
|
|
return 1;
|
|
}
|
|
/*
|
|
* In preemptible kernels, ->rcu_read_lock_nesting tells the tick
|
|
* whether the current CPU is in an RCU read-side critical section,
|
|
* so the tick can report quiescent states even for CPUs looping
|
|
* in kernel context. In contrast, in non-preemptible kernels,
|
|
* RCU readers leave no in-memory hints, which means that CPU-bound
|
|
* processes executing in kernel context might never report an
|
|
* RCU quiescent state. Therefore, the following code causes
|
|
* cond_resched() to report a quiescent state, but only when RCU
|
|
* is in urgent need of one.
|
|
*/
|
|
#ifndef CONFIG_PREEMPT_RCU
|
|
rcu_all_qs();
|
|
#endif
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(__cond_resched);
|
|
#endif
|
|
|
|
#ifdef CONFIG_PREEMPT_DYNAMIC
|
|
DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
|
|
EXPORT_STATIC_CALL_TRAMP(cond_resched);
|
|
|
|
DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
|
|
EXPORT_STATIC_CALL_TRAMP(might_resched);
|
|
#endif
|
|
|
|
/*
|
|
* __cond_resched_lock() - if a reschedule is pending, drop the given lock,
|
|
* call schedule, and on return reacquire the lock.
|
|
*
|
|
* This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
|
|
* operations here to prevent schedule() from being called twice (once via
|
|
* spin_unlock(), once by hand).
|
|
*/
|
|
int __cond_resched_lock(spinlock_t *lock)
|
|
{
|
|
int resched = should_resched(PREEMPT_LOCK_OFFSET);
|
|
int ret = 0;
|
|
|
|
lockdep_assert_held(lock);
|
|
|
|
if (spin_needbreak(lock) || resched) {
|
|
spin_unlock(lock);
|
|
if (!_cond_resched())
|
|
cpu_relax();
|
|
ret = 1;
|
|
spin_lock(lock);
|
|
}
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(__cond_resched_lock);
|
|
|
|
int __cond_resched_rwlock_read(rwlock_t *lock)
|
|
{
|
|
int resched = should_resched(PREEMPT_LOCK_OFFSET);
|
|
int ret = 0;
|
|
|
|
lockdep_assert_held_read(lock);
|
|
|
|
if (rwlock_needbreak(lock) || resched) {
|
|
read_unlock(lock);
|
|
if (!_cond_resched())
|
|
cpu_relax();
|
|
ret = 1;
|
|
read_lock(lock);
|
|
}
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(__cond_resched_rwlock_read);
|
|
|
|
int __cond_resched_rwlock_write(rwlock_t *lock)
|
|
{
|
|
int resched = should_resched(PREEMPT_LOCK_OFFSET);
|
|
int ret = 0;
|
|
|
|
lockdep_assert_held_write(lock);
|
|
|
|
if (rwlock_needbreak(lock) || resched) {
|
|
write_unlock(lock);
|
|
if (!_cond_resched())
|
|
cpu_relax();
|
|
ret = 1;
|
|
write_lock(lock);
|
|
}
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(__cond_resched_rwlock_write);
|
|
|
|
/**
|
|
* yield - yield the current processor to other threads.
|
|
*
|
|
* Do not ever use this function, there's a 99% chance you're doing it wrong.
|
|
*
|
|
* The scheduler is at all times free to pick the calling task as the most
|
|
* eligible task to run, if removing the yield() call from your code breaks
|
|
* it, it's already broken.
|
|
*
|
|
* Typical broken usage is:
|
|
*
|
|
* while (!event)
|
|
* yield();
|
|
*
|
|
* where one assumes that yield() will let 'the other' process run that will
|
|
* make event true. If the current task is a SCHED_FIFO task that will never
|
|
* happen. Never use yield() as a progress guarantee!!
|
|
*
|
|
* If you want to use yield() to wait for something, use wait_event().
|
|
* If you want to use yield() to be 'nice' for others, use cond_resched().
|
|
* If you still want to use yield(), do not!
|
|
*/
|
|
void __sched yield(void)
|
|
{
|
|
set_current_state(TASK_RUNNING);
|
|
do_sched_yield();
|
|
}
|
|
EXPORT_SYMBOL(yield);
|
|
|
|
/**
|
|
* yield_to - yield the current processor to another thread in
|
|
* your thread group, or accelerate that thread toward the
|
|
* processor it's on.
|
|
* @p: target task
|
|
* @preempt: whether task preemption is allowed or not
|
|
*
|
|
* It's the caller's job to ensure that the target task struct
|
|
* can't go away on us before we can do any checks.
|
|
*
|
|
* Return:
|
|
* true (>0) if we indeed boosted the target task.
|
|
* false (0) if we failed to boost the target.
|
|
* -ESRCH if there's no task to yield to.
|
|
*/
|
|
int __sched yield_to(struct task_struct *p, bool preempt)
|
|
{
|
|
struct task_struct *curr = current;
|
|
struct rq *rq, *p_rq;
|
|
unsigned long flags;
|
|
int yielded = 0;
|
|
|
|
local_irq_save(flags);
|
|
rq = this_rq();
|
|
|
|
again:
|
|
p_rq = task_rq(p);
|
|
/*
|
|
* If we're the only runnable task on the rq and target rq also
|
|
* has only one task, there's absolutely no point in yielding.
|
|
*/
|
|
if (rq->nr_running == 1 && p_rq->nr_running == 1) {
|
|
yielded = -ESRCH;
|
|
goto out_irq;
|
|
}
|
|
|
|
double_rq_lock(rq, p_rq);
|
|
if (task_rq(p) != p_rq) {
|
|
double_rq_unlock(rq, p_rq);
|
|
goto again;
|
|
}
|
|
|
|
if (!curr->sched_class->yield_to_task)
|
|
goto out_unlock;
|
|
|
|
if (curr->sched_class != p->sched_class)
|
|
goto out_unlock;
|
|
|
|
if (task_running(p_rq, p) || !task_is_running(p))
|
|
goto out_unlock;
|
|
|
|
yielded = curr->sched_class->yield_to_task(rq, p);
|
|
if (yielded) {
|
|
schedstat_inc(rq->yld_count);
|
|
/*
|
|
* Make p's CPU reschedule; pick_next_entity takes care of
|
|
* fairness.
|
|
*/
|
|
if (preempt && rq != p_rq)
|
|
resched_curr(p_rq);
|
|
}
|
|
|
|
out_unlock:
|
|
double_rq_unlock(rq, p_rq);
|
|
out_irq:
|
|
local_irq_restore(flags);
|
|
|
|
if (yielded > 0)
|
|
schedule();
|
|
|
|
return yielded;
|
|
}
|
|
EXPORT_SYMBOL_GPL(yield_to);
|
|
|
|
int io_schedule_prepare(void)
|
|
{
|
|
int old_iowait = current->in_iowait;
|
|
|
|
current->in_iowait = 1;
|
|
blk_schedule_flush_plug(current);
|
|
|
|
return old_iowait;
|
|
}
|
|
|
|
void io_schedule_finish(int token)
|
|
{
|
|
current->in_iowait = token;
|
|
}
|
|
|
|
/*
|
|
* This task is about to go to sleep on IO. Increment rq->nr_iowait so
|
|
* that process accounting knows that this is a task in IO wait state.
|
|
*/
|
|
long __sched io_schedule_timeout(long timeout)
|
|
{
|
|
int token;
|
|
long ret;
|
|
|
|
token = io_schedule_prepare();
|
|
ret = schedule_timeout(timeout);
|
|
io_schedule_finish(token);
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(io_schedule_timeout);
|
|
|
|
void __sched io_schedule(void)
|
|
{
|
|
int token;
|
|
|
|
token = io_schedule_prepare();
|
|
schedule();
|
|
io_schedule_finish(token);
|
|
}
|
|
EXPORT_SYMBOL(io_schedule);
|
|
|
|
/**
|
|
* sys_sched_get_priority_max - return maximum RT priority.
|
|
* @policy: scheduling class.
|
|
*
|
|
* Return: On success, this syscall returns the maximum
|
|
* rt_priority that can be used by a given scheduling class.
|
|
* On failure, a negative error code is returned.
|
|
*/
|
|
SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
|
|
{
|
|
int ret = -EINVAL;
|
|
|
|
switch (policy) {
|
|
case SCHED_FIFO:
|
|
case SCHED_RR:
|
|
ret = MAX_RT_PRIO-1;
|
|
break;
|
|
case SCHED_DEADLINE:
|
|
case SCHED_NORMAL:
|
|
case SCHED_BATCH:
|
|
case SCHED_IDLE:
|
|
ret = 0;
|
|
break;
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
/**
|
|
* sys_sched_get_priority_min - return minimum RT priority.
|
|
* @policy: scheduling class.
|
|
*
|
|
* Return: On success, this syscall returns the minimum
|
|
* rt_priority that can be used by a given scheduling class.
|
|
* On failure, a negative error code is returned.
|
|
*/
|
|
SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
|
|
{
|
|
int ret = -EINVAL;
|
|
|
|
switch (policy) {
|
|
case SCHED_FIFO:
|
|
case SCHED_RR:
|
|
ret = 1;
|
|
break;
|
|
case SCHED_DEADLINE:
|
|
case SCHED_NORMAL:
|
|
case SCHED_BATCH:
|
|
case SCHED_IDLE:
|
|
ret = 0;
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
|
|
{
|
|
struct task_struct *p;
|
|
unsigned int time_slice;
|
|
struct rq_flags rf;
|
|
struct rq *rq;
|
|
int retval;
|
|
|
|
if (pid < 0)
|
|
return -EINVAL;
|
|
|
|
retval = -ESRCH;
|
|
rcu_read_lock();
|
|
p = find_process_by_pid(pid);
|
|
if (!p)
|
|
goto out_unlock;
|
|
|
|
retval = security_task_getscheduler(p);
|
|
if (retval)
|
|
goto out_unlock;
|
|
|
|
rq = task_rq_lock(p, &rf);
|
|
time_slice = 0;
|
|
if (p->sched_class->get_rr_interval)
|
|
time_slice = p->sched_class->get_rr_interval(rq, p);
|
|
task_rq_unlock(rq, p, &rf);
|
|
|
|
rcu_read_unlock();
|
|
jiffies_to_timespec64(time_slice, t);
|
|
return 0;
|
|
|
|
out_unlock:
|
|
rcu_read_unlock();
|
|
return retval;
|
|
}
|
|
|
|
/**
|
|
* sys_sched_rr_get_interval - return the default timeslice of a process.
|
|
* @pid: pid of the process.
|
|
* @interval: userspace pointer to the timeslice value.
|
|
*
|
|
* this syscall writes the default timeslice value of a given process
|
|
* into the user-space timespec buffer. A value of '0' means infinity.
|
|
*
|
|
* Return: On success, 0 and the timeslice is in @interval. Otherwise,
|
|
* an error code.
|
|
*/
|
|
SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
|
|
struct __kernel_timespec __user *, interval)
|
|
{
|
|
struct timespec64 t;
|
|
int retval = sched_rr_get_interval(pid, &t);
|
|
|
|
if (retval == 0)
|
|
retval = put_timespec64(&t, interval);
|
|
|
|
return retval;
|
|
}
|
|
|
|
#ifdef CONFIG_COMPAT_32BIT_TIME
|
|
SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
|
|
struct old_timespec32 __user *, interval)
|
|
{
|
|
struct timespec64 t;
|
|
int retval = sched_rr_get_interval(pid, &t);
|
|
|
|
if (retval == 0)
|
|
retval = put_old_timespec32(&t, interval);
|
|
return retval;
|
|
}
|
|
#endif
|
|
|
|
void sched_show_task(struct task_struct *p)
|
|
{
|
|
unsigned long free = 0;
|
|
int ppid;
|
|
|
|
if (!try_get_task_stack(p))
|
|
return;
|
|
|
|
pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
|
|
|
|
if (task_is_running(p))
|
|
pr_cont(" running task ");
|
|
#ifdef CONFIG_DEBUG_STACK_USAGE
|
|
free = stack_not_used(p);
|
|
#endif
|
|
ppid = 0;
|
|
rcu_read_lock();
|
|
if (pid_alive(p))
|
|
ppid = task_pid_nr(rcu_dereference(p->real_parent));
|
|
rcu_read_unlock();
|
|
pr_cont(" stack:%5lu pid:%5d ppid:%6d flags:0x%08lx\n",
|
|
free, task_pid_nr(p), ppid,
|
|
(unsigned long)task_thread_info(p)->flags);
|
|
|
|
print_worker_info(KERN_INFO, p);
|
|
print_stop_info(KERN_INFO, p);
|
|
trace_android_vh_sched_show_task(p);
|
|
show_stack(p, NULL, KERN_INFO);
|
|
put_task_stack(p);
|
|
}
|
|
EXPORT_SYMBOL_GPL(sched_show_task);
|
|
|
|
static inline bool
|
|
state_filter_match(unsigned long state_filter, struct task_struct *p)
|
|
{
|
|
unsigned int state = READ_ONCE(p->__state);
|
|
|
|
/* no filter, everything matches */
|
|
if (!state_filter)
|
|
return true;
|
|
|
|
/* filter, but doesn't match */
|
|
if (!(state & state_filter))
|
|
return false;
|
|
|
|
/*
|
|
* When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
|
|
* TASK_KILLABLE).
|
|
*/
|
|
if (state_filter == TASK_UNINTERRUPTIBLE && state == TASK_IDLE)
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
|
|
|
|
void show_state_filter(unsigned int state_filter)
|
|
{
|
|
struct task_struct *g, *p;
|
|
|
|
rcu_read_lock();
|
|
for_each_process_thread(g, p) {
|
|
/*
|
|
* reset the NMI-timeout, listing all files on a slow
|
|
* console might take a lot of time:
|
|
* Also, reset softlockup watchdogs on all CPUs, because
|
|
* another CPU might be blocked waiting for us to process
|
|
* an IPI.
|
|
*/
|
|
touch_nmi_watchdog();
|
|
touch_all_softlockup_watchdogs();
|
|
if (state_filter_match(state_filter, p))
|
|
sched_show_task(p);
|
|
}
|
|
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
if (!state_filter)
|
|
sysrq_sched_debug_show();
|
|
#endif
|
|
rcu_read_unlock();
|
|
/*
|
|
* Only show locks if all tasks are dumped:
|
|
*/
|
|
if (!state_filter)
|
|
debug_show_all_locks();
|
|
}
|
|
|
|
/**
|
|
* init_idle - set up an idle thread for a given CPU
|
|
* @idle: task in question
|
|
* @cpu: CPU the idle task belongs to
|
|
*
|
|
* NOTE: this function does not set the idle thread's NEED_RESCHED
|
|
* flag, to make booting more robust.
|
|
*/
|
|
void __init init_idle(struct task_struct *idle, int cpu)
|
|
{
|
|
struct rq *rq = cpu_rq(cpu);
|
|
unsigned long flags;
|
|
|
|
__sched_fork(0, idle);
|
|
|
|
/*
|
|
* The idle task doesn't need the kthread struct to function, but it
|
|
* is dressed up as a per-CPU kthread and thus needs to play the part
|
|
* if we want to avoid special-casing it in code that deals with per-CPU
|
|
* kthreads.
|
|
*/
|
|
set_kthread_struct(idle);
|
|
|
|
raw_spin_lock_irqsave(&idle->pi_lock, flags);
|
|
raw_spin_rq_lock(rq);
|
|
|
|
idle->__state = TASK_RUNNING;
|
|
idle->se.exec_start = sched_clock();
|
|
/*
|
|
* PF_KTHREAD should already be set at this point; regardless, make it
|
|
* look like a proper per-CPU kthread.
|
|
*/
|
|
idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
|
|
kthread_set_per_cpu(idle, cpu);
|
|
|
|
#ifdef CONFIG_SMP
|
|
/*
|
|
* It's possible that init_idle() gets called multiple times on a task,
|
|
* in that case do_set_cpus_allowed() will not do the right thing.
|
|
*
|
|
* And since this is boot we can forgo the serialization.
|
|
*/
|
|
set_cpus_allowed_common(idle, cpumask_of(cpu), 0);
|
|
#endif
|
|
/*
|
|
* We're having a chicken and egg problem, even though we are
|
|
* holding rq->lock, the CPU isn't yet set to this CPU so the
|
|
* lockdep check in task_group() will fail.
|
|
*
|
|
* Similar case to sched_fork(). / Alternatively we could
|
|
* use task_rq_lock() here and obtain the other rq->lock.
|
|
*
|
|
* Silence PROVE_RCU
|
|
*/
|
|
rcu_read_lock();
|
|
__set_task_cpu(idle, cpu);
|
|
rcu_read_unlock();
|
|
|
|
rq->idle = idle;
|
|
rcu_assign_pointer(rq->curr, idle);
|
|
idle->on_rq = TASK_ON_RQ_QUEUED;
|
|
#ifdef CONFIG_SMP
|
|
idle->on_cpu = 1;
|
|
#endif
|
|
raw_spin_rq_unlock(rq);
|
|
raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
|
|
|
|
/* Set the preempt count _outside_ the spinlocks! */
|
|
init_idle_preempt_count(idle, cpu);
|
|
|
|
/*
|
|
* The idle tasks have their own, simple scheduling class:
|
|
*/
|
|
idle->sched_class = &idle_sched_class;
|
|
ftrace_graph_init_idle_task(idle, cpu);
|
|
vtime_init_idle(idle, cpu);
|
|
#ifdef CONFIG_SMP
|
|
sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
|
|
#endif
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
int cpuset_cpumask_can_shrink(const struct cpumask *cur,
|
|
const struct cpumask *trial)
|
|
{
|
|
int ret = 1;
|
|
|
|
if (!cpumask_weight(cur))
|
|
return ret;
|
|
|
|
ret = dl_cpuset_cpumask_can_shrink(cur, trial);
|
|
|
|
return ret;
|
|
}
|
|
|
|
int task_can_attach(struct task_struct *p,
|
|
const struct cpumask *cs_effective_cpus)
|
|
{
|
|
int ret = 0;
|
|
|
|
/*
|
|
* Kthreads which disallow setaffinity shouldn't be moved
|
|
* to a new cpuset; we don't want to change their CPU
|
|
* affinity and isolating such threads by their set of
|
|
* allowed nodes is unnecessary. Thus, cpusets are not
|
|
* applicable for such threads. This prevents checking for
|
|
* success of set_cpus_allowed_ptr() on all attached tasks
|
|
* before cpus_mask may be changed.
|
|
*/
|
|
if (p->flags & PF_NO_SETAFFINITY) {
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
|
|
if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
|
|
cs_effective_cpus)) {
|
|
int cpu = cpumask_any_and(cpu_active_mask, cs_effective_cpus);
|
|
|
|
if (unlikely(cpu >= nr_cpu_ids))
|
|
return -EINVAL;
|
|
ret = dl_cpu_busy(cpu, p);
|
|
}
|
|
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
bool sched_smp_initialized __read_mostly;
|
|
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
/* Migrate current task p to target_cpu */
|
|
int migrate_task_to(struct task_struct *p, int target_cpu)
|
|
{
|
|
struct migration_arg arg = { p, target_cpu };
|
|
int curr_cpu = task_cpu(p);
|
|
|
|
if (curr_cpu == target_cpu)
|
|
return 0;
|
|
|
|
if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
|
|
return -EINVAL;
|
|
|
|
/* TODO: This is not properly updating schedstats */
|
|
|
|
trace_sched_move_numa(p, curr_cpu, target_cpu);
|
|
return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
|
|
}
|
|
|
|
/*
|
|
* Requeue a task on a given node and accurately track the number of NUMA
|
|
* tasks on the runqueues
|
|
*/
|
|
void sched_setnuma(struct task_struct *p, int nid)
|
|
{
|
|
bool queued, running;
|
|
struct rq_flags rf;
|
|
struct rq *rq;
|
|
|
|
rq = task_rq_lock(p, &rf);
|
|
queued = task_on_rq_queued(p);
|
|
running = task_current(rq, p);
|
|
|
|
if (queued)
|
|
dequeue_task(rq, p, DEQUEUE_SAVE);
|
|
if (running)
|
|
put_prev_task(rq, p);
|
|
|
|
p->numa_preferred_nid = nid;
|
|
|
|
if (queued)
|
|
enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
|
|
if (running)
|
|
set_next_task(rq, p);
|
|
task_rq_unlock(rq, p, &rf);
|
|
}
|
|
#endif /* CONFIG_NUMA_BALANCING */
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
/*
|
|
* Ensure that the idle task is using init_mm right before its CPU goes
|
|
* offline.
|
|
*/
|
|
void idle_task_exit(void)
|
|
{
|
|
struct mm_struct *mm = current->active_mm;
|
|
|
|
BUG_ON(cpu_online(smp_processor_id()));
|
|
BUG_ON(current != this_rq()->idle);
|
|
|
|
if (mm != &init_mm) {
|
|
switch_mm(mm, &init_mm, current);
|
|
finish_arch_post_lock_switch();
|
|
}
|
|
|
|
/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
|
|
}
|
|
|
|
struct task_struct *pick_migrate_task(struct rq *rq)
|
|
{
|
|
const struct sched_class *class;
|
|
struct task_struct *next;
|
|
|
|
for_each_class(class) {
|
|
next = class->pick_next_task(rq);
|
|
if (next) {
|
|
next->sched_class->put_prev_task(rq, next);
|
|
return next;
|
|
}
|
|
}
|
|
|
|
/* The idle class should always have a runnable task */
|
|
BUG();
|
|
}
|
|
EXPORT_SYMBOL_GPL(pick_migrate_task);
|
|
|
|
static int __balance_push_cpu_stop(void *arg)
|
|
{
|
|
struct task_struct *p = arg;
|
|
struct rq *rq = this_rq();
|
|
struct rq_flags rf;
|
|
int cpu;
|
|
|
|
raw_spin_lock_irq(&p->pi_lock);
|
|
rq_lock(rq, &rf);
|
|
|
|
update_rq_clock(rq);
|
|
|
|
if (task_rq(p) == rq && task_on_rq_queued(p)) {
|
|
cpu = select_fallback_rq(rq->cpu, p);
|
|
rq = __migrate_task(rq, &rf, p, cpu);
|
|
}
|
|
|
|
rq_unlock(rq, &rf);
|
|
raw_spin_unlock_irq(&p->pi_lock);
|
|
|
|
put_task_struct(p);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
|
|
|
|
/*
|
|
* Ensure we only run per-cpu kthreads once the CPU goes !active.
|
|
*
|
|
* This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
|
|
* effective when the hotplug motion is down.
|
|
*/
|
|
static void balance_push(struct rq *rq)
|
|
{
|
|
struct task_struct *push_task = rq->curr;
|
|
|
|
lockdep_assert_rq_held(rq);
|
|
|
|
/*
|
|
* Ensure the thing is persistent until balance_push_set(.on = false);
|
|
*/
|
|
rq->balance_callback = &balance_push_callback;
|
|
|
|
/*
|
|
* Only active while going offline and when invoked on the outgoing
|
|
* CPU.
|
|
*/
|
|
if (!cpu_dying(rq->cpu) || rq != this_rq())
|
|
return;
|
|
|
|
/*
|
|
* Both the cpu-hotplug and stop task are in this case and are
|
|
* required to complete the hotplug process.
|
|
*/
|
|
if (kthread_is_per_cpu(push_task) ||
|
|
is_migration_disabled(push_task)) {
|
|
|
|
/*
|
|
* If this is the idle task on the outgoing CPU try to wake
|
|
* up the hotplug control thread which might wait for the
|
|
* last task to vanish. The rcuwait_active() check is
|
|
* accurate here because the waiter is pinned on this CPU
|
|
* and can't obviously be running in parallel.
|
|
*
|
|
* On RT kernels this also has to check whether there are
|
|
* pinned and scheduled out tasks on the runqueue. They
|
|
* need to leave the migrate disabled section first.
|
|
*/
|
|
if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
|
|
rcuwait_active(&rq->hotplug_wait)) {
|
|
raw_spin_rq_unlock(rq);
|
|
rcuwait_wake_up(&rq->hotplug_wait);
|
|
raw_spin_rq_lock(rq);
|
|
}
|
|
return;
|
|
}
|
|
|
|
get_task_struct(push_task);
|
|
/*
|
|
* Temporarily drop rq->lock such that we can wake-up the stop task.
|
|
* Both preemption and IRQs are still disabled.
|
|
*/
|
|
raw_spin_rq_unlock(rq);
|
|
stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
|
|
this_cpu_ptr(&push_work));
|
|
/*
|
|
* At this point need_resched() is true and we'll take the loop in
|
|
* schedule(). The next pick is obviously going to be the stop task
|
|
* which kthread_is_per_cpu() and will push this task away.
|
|
*/
|
|
raw_spin_rq_lock(rq);
|
|
}
|
|
|
|
static void balance_push_set(int cpu, bool on)
|
|
{
|
|
struct rq *rq = cpu_rq(cpu);
|
|
struct rq_flags rf;
|
|
|
|
rq_lock_irqsave(rq, &rf);
|
|
if (on) {
|
|
WARN_ON_ONCE(rq->balance_callback);
|
|
rq->balance_callback = &balance_push_callback;
|
|
} else if (rq->balance_callback == &balance_push_callback) {
|
|
rq->balance_callback = NULL;
|
|
}
|
|
rq_unlock_irqrestore(rq, &rf);
|
|
}
|
|
|
|
/*
|
|
* Invoked from a CPUs hotplug control thread after the CPU has been marked
|
|
* inactive. All tasks which are not per CPU kernel threads are either
|
|
* pushed off this CPU now via balance_push() or placed on a different CPU
|
|
* during wakeup. Wait until the CPU is quiescent.
|
|
*/
|
|
static void balance_hotplug_wait(void)
|
|
{
|
|
struct rq *rq = this_rq();
|
|
|
|
rcuwait_wait_event(&rq->hotplug_wait,
|
|
rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
|
|
TASK_UNINTERRUPTIBLE);
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void balance_push(struct rq *rq)
|
|
{
|
|
}
|
|
|
|
static inline void balance_push_set(int cpu, bool on)
|
|
{
|
|
}
|
|
|
|
static inline void balance_hotplug_wait(void)
|
|
{
|
|
}
|
|
|
|
#endif /* CONFIG_HOTPLUG_CPU */
|
|
|
|
void set_rq_online(struct rq *rq)
|
|
{
|
|
if (!rq->online) {
|
|
const struct sched_class *class;
|
|
|
|
cpumask_set_cpu(rq->cpu, rq->rd->online);
|
|
rq->online = 1;
|
|
|
|
for_each_class(class) {
|
|
if (class->rq_online)
|
|
class->rq_online(rq);
|
|
}
|
|
}
|
|
}
|
|
|
|
void set_rq_offline(struct rq *rq)
|
|
{
|
|
if (rq->online) {
|
|
const struct sched_class *class;
|
|
|
|
for_each_class(class) {
|
|
if (class->rq_offline)
|
|
class->rq_offline(rq);
|
|
}
|
|
|
|
cpumask_clear_cpu(rq->cpu, rq->rd->online);
|
|
rq->online = 0;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* used to mark begin/end of suspend/resume:
|
|
*/
|
|
static int num_cpus_frozen;
|
|
|
|
/*
|
|
* Update cpusets according to cpu_active mask. If cpusets are
|
|
* disabled, cpuset_update_active_cpus() becomes a simple wrapper
|
|
* around partition_sched_domains().
|
|
*
|
|
* If we come here as part of a suspend/resume, don't touch cpusets because we
|
|
* want to restore it back to its original state upon resume anyway.
|
|
*/
|
|
static void cpuset_cpu_active(void)
|
|
{
|
|
if (cpuhp_tasks_frozen) {
|
|
/*
|
|
* num_cpus_frozen tracks how many CPUs are involved in suspend
|
|
* resume sequence. As long as this is not the last online
|
|
* operation in the resume sequence, just build a single sched
|
|
* domain, ignoring cpusets.
|
|
*/
|
|
partition_sched_domains(1, NULL, NULL);
|
|
if (--num_cpus_frozen)
|
|
return;
|
|
/*
|
|
* This is the last CPU online operation. So fall through and
|
|
* restore the original sched domains by considering the
|
|
* cpuset configurations.
|
|
*/
|
|
cpuset_force_rebuild();
|
|
}
|
|
cpuset_update_active_cpus();
|
|
}
|
|
|
|
static int cpuset_cpu_inactive(unsigned int cpu)
|
|
{
|
|
if (!cpuhp_tasks_frozen) {
|
|
int ret = dl_cpu_busy(cpu, NULL);
|
|
|
|
if (ret)
|
|
return ret;
|
|
cpuset_update_active_cpus();
|
|
} else {
|
|
num_cpus_frozen++;
|
|
partition_sched_domains(1, NULL, NULL);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
int sched_cpu_activate(unsigned int cpu)
|
|
{
|
|
struct rq *rq = cpu_rq(cpu);
|
|
struct rq_flags rf;
|
|
|
|
/*
|
|
* Clear the balance_push callback and prepare to schedule
|
|
* regular tasks.
|
|
*/
|
|
balance_push_set(cpu, false);
|
|
|
|
#ifdef CONFIG_SCHED_SMT
|
|
/*
|
|
* When going up, increment the number of cores with SMT present.
|
|
*/
|
|
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
|
|
static_branch_inc_cpuslocked(&sched_smt_present);
|
|
#endif
|
|
set_cpu_active(cpu, true);
|
|
|
|
if (sched_smp_initialized) {
|
|
sched_domains_numa_masks_set(cpu);
|
|
cpuset_cpu_active();
|
|
}
|
|
|
|
/*
|
|
* Put the rq online, if not already. This happens:
|
|
*
|
|
* 1) In the early boot process, because we build the real domains
|
|
* after all CPUs have been brought up.
|
|
*
|
|
* 2) At runtime, if cpuset_cpu_active() fails to rebuild the
|
|
* domains.
|
|
*/
|
|
rq_lock_irqsave(rq, &rf);
|
|
if (rq->rd) {
|
|
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
|
|
set_rq_online(rq);
|
|
}
|
|
rq_unlock_irqrestore(rq, &rf);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int sched_cpu_deactivate(unsigned int cpu)
|
|
{
|
|
struct rq *rq = cpu_rq(cpu);
|
|
struct rq_flags rf;
|
|
int ret;
|
|
|
|
/*
|
|
* Remove CPU from nohz.idle_cpus_mask to prevent participating in
|
|
* load balancing when not active
|
|
*/
|
|
nohz_balance_exit_idle(rq);
|
|
|
|
set_cpu_active(cpu, false);
|
|
|
|
/*
|
|
* From this point forward, this CPU will refuse to run any task that
|
|
* is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
|
|
* push those tasks away until this gets cleared, see
|
|
* sched_cpu_dying().
|
|
*/
|
|
balance_push_set(cpu, true);
|
|
|
|
/*
|
|
* We've cleared cpu_active_mask / set balance_push, wait for all
|
|
* preempt-disabled and RCU users of this state to go away such that
|
|
* all new such users will observe it.
|
|
*
|
|
* Specifically, we rely on ttwu to no longer target this CPU, see
|
|
* ttwu_queue_cond() and is_cpu_allowed().
|
|
*
|
|
* Do sync before park smpboot threads to take care the rcu boost case.
|
|
*/
|
|
synchronize_rcu();
|
|
|
|
rq_lock_irqsave(rq, &rf);
|
|
if (rq->rd) {
|
|
update_rq_clock(rq);
|
|
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
|
|
set_rq_offline(rq);
|
|
}
|
|
rq_unlock_irqrestore(rq, &rf);
|
|
|
|
#ifdef CONFIG_SCHED_SMT
|
|
/*
|
|
* When going down, decrement the number of cores with SMT present.
|
|
*/
|
|
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
|
|
static_branch_dec_cpuslocked(&sched_smt_present);
|
|
|
|
sched_core_cpu_deactivate(cpu);
|
|
#endif
|
|
|
|
if (!sched_smp_initialized)
|
|
return 0;
|
|
|
|
ret = cpuset_cpu_inactive(cpu);
|
|
if (ret) {
|
|
balance_push_set(cpu, false);
|
|
set_cpu_active(cpu, true);
|
|
return ret;
|
|
}
|
|
sched_domains_numa_masks_clear(cpu);
|
|
return 0;
|
|
}
|
|
|
|
static void sched_rq_cpu_starting(unsigned int cpu)
|
|
{
|
|
struct rq *rq = cpu_rq(cpu);
|
|
|
|
rq->calc_load_update = calc_load_update;
|
|
update_max_interval();
|
|
}
|
|
|
|
int sched_cpu_starting(unsigned int cpu)
|
|
{
|
|
sched_core_cpu_starting(cpu);
|
|
sched_rq_cpu_starting(cpu);
|
|
sched_tick_start(cpu);
|
|
trace_android_rvh_sched_cpu_starting(cpu);
|
|
return 0;
|
|
}
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
/*
|
|
* Invoked immediately before the stopper thread is invoked to bring the
|
|
* CPU down completely. At this point all per CPU kthreads except the
|
|
* hotplug thread (current) and the stopper thread (inactive) have been
|
|
* either parked or have been unbound from the outgoing CPU. Ensure that
|
|
* any of those which might be on the way out are gone.
|
|
*
|
|
* If after this point a bound task is being woken on this CPU then the
|
|
* responsible hotplug callback has failed to do it's job.
|
|
* sched_cpu_dying() will catch it with the appropriate fireworks.
|
|
*/
|
|
int sched_cpu_wait_empty(unsigned int cpu)
|
|
{
|
|
balance_hotplug_wait();
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Since this CPU is going 'away' for a while, fold any nr_active delta we
|
|
* might have. Called from the CPU stopper task after ensuring that the
|
|
* stopper is the last running task on the CPU, so nr_active count is
|
|
* stable. We need to take the teardown thread which is calling this into
|
|
* account, so we hand in adjust = 1 to the load calculation.
|
|
*
|
|
* Also see the comment "Global load-average calculations".
|
|
*/
|
|
static void calc_load_migrate(struct rq *rq)
|
|
{
|
|
long delta = calc_load_fold_active(rq, 1);
|
|
|
|
if (delta)
|
|
atomic_long_add(delta, &calc_load_tasks);
|
|
}
|
|
|
|
static void dump_rq_tasks(struct rq *rq, const char *loglvl)
|
|
{
|
|
struct task_struct *g, *p;
|
|
int cpu = cpu_of(rq);
|
|
|
|
lockdep_assert_rq_held(rq);
|
|
|
|
printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
|
|
for_each_process_thread(g, p) {
|
|
if (task_cpu(p) != cpu)
|
|
continue;
|
|
|
|
if (!task_on_rq_queued(p))
|
|
continue;
|
|
|
|
printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
|
|
}
|
|
}
|
|
|
|
int sched_cpu_dying(unsigned int cpu)
|
|
{
|
|
struct rq *rq = cpu_rq(cpu);
|
|
struct rq_flags rf;
|
|
|
|
/* Handle pending wakeups and then migrate everything off */
|
|
sched_tick_stop(cpu);
|
|
|
|
rq_lock_irqsave(rq, &rf);
|
|
if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
|
|
WARN(true, "Dying CPU not properly vacated!");
|
|
dump_rq_tasks(rq, KERN_WARNING);
|
|
}
|
|
rq_unlock_irqrestore(rq, &rf);
|
|
|
|
trace_android_rvh_sched_cpu_dying(cpu);
|
|
|
|
calc_load_migrate(rq);
|
|
update_max_interval();
|
|
hrtick_clear(rq);
|
|
sched_core_cpu_dying(cpu);
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
void __init sched_init_smp(void)
|
|
{
|
|
sched_init_numa();
|
|
|
|
/*
|
|
* There's no userspace yet to cause hotplug operations; hence all the
|
|
* CPU masks are stable and all blatant races in the below code cannot
|
|
* happen.
|
|
*/
|
|
mutex_lock(&sched_domains_mutex);
|
|
sched_init_domains(cpu_active_mask);
|
|
mutex_unlock(&sched_domains_mutex);
|
|
|
|
/* Move init over to a non-isolated CPU */
|
|
if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
|
|
BUG();
|
|
current->flags &= ~PF_NO_SETAFFINITY;
|
|
sched_init_granularity();
|
|
|
|
init_sched_rt_class();
|
|
init_sched_dl_class();
|
|
|
|
sched_smp_initialized = true;
|
|
}
|
|
|
|
static int __init migration_init(void)
|
|
{
|
|
sched_cpu_starting(smp_processor_id());
|
|
return 0;
|
|
}
|
|
early_initcall(migration_init);
|
|
|
|
#else
|
|
void __init sched_init_smp(void)
|
|
{
|
|
sched_init_granularity();
|
|
}
|
|
#endif /* CONFIG_SMP */
|
|
|
|
int in_sched_functions(unsigned long addr)
|
|
{
|
|
return in_lock_functions(addr) ||
|
|
(addr >= (unsigned long)__sched_text_start
|
|
&& addr < (unsigned long)__sched_text_end);
|
|
}
|
|
|
|
#ifdef CONFIG_CGROUP_SCHED
|
|
/*
|
|
* Default task group.
|
|
* Every task in system belongs to this group at bootup.
|
|
*/
|
|
struct task_group root_task_group;
|
|
EXPORT_SYMBOL_GPL(root_task_group);
|
|
LIST_HEAD(task_groups);
|
|
EXPORT_SYMBOL_GPL(task_groups);
|
|
|
|
/* Cacheline aligned slab cache for task_group */
|
|
static struct kmem_cache *task_group_cache __read_mostly;
|
|
#endif
|
|
|
|
DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
|
|
DECLARE_PER_CPU(cpumask_var_t, select_idle_mask);
|
|
|
|
void __init sched_init(void)
|
|
{
|
|
unsigned long ptr = 0;
|
|
int i;
|
|
|
|
/* Make sure the linker didn't screw up */
|
|
BUG_ON(&idle_sched_class + 1 != &fair_sched_class ||
|
|
&fair_sched_class + 1 != &rt_sched_class ||
|
|
&rt_sched_class + 1 != &dl_sched_class);
|
|
#ifdef CONFIG_SMP
|
|
BUG_ON(&dl_sched_class + 1 != &stop_sched_class);
|
|
#endif
|
|
|
|
wait_bit_init();
|
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
ptr += 2 * nr_cpu_ids * sizeof(void **);
|
|
#endif
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
ptr += 2 * nr_cpu_ids * sizeof(void **);
|
|
#endif
|
|
if (ptr) {
|
|
ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
|
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
root_task_group.se = (struct sched_entity **)ptr;
|
|
ptr += nr_cpu_ids * sizeof(void **);
|
|
|
|
root_task_group.cfs_rq = (struct cfs_rq **)ptr;
|
|
ptr += nr_cpu_ids * sizeof(void **);
|
|
|
|
root_task_group.shares = ROOT_TASK_GROUP_LOAD;
|
|
init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
|
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
root_task_group.rt_se = (struct sched_rt_entity **)ptr;
|
|
ptr += nr_cpu_ids * sizeof(void **);
|
|
|
|
root_task_group.rt_rq = (struct rt_rq **)ptr;
|
|
ptr += nr_cpu_ids * sizeof(void **);
|
|
|
|
#endif /* CONFIG_RT_GROUP_SCHED */
|
|
}
|
|
#ifdef CONFIG_CPUMASK_OFFSTACK
|
|
for_each_possible_cpu(i) {
|
|
per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
|
|
cpumask_size(), GFP_KERNEL, cpu_to_node(i));
|
|
per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node(
|
|
cpumask_size(), GFP_KERNEL, cpu_to_node(i));
|
|
}
|
|
#endif /* CONFIG_CPUMASK_OFFSTACK */
|
|
|
|
init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
|
|
init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(), global_rt_runtime());
|
|
|
|
#ifdef CONFIG_SMP
|
|
init_defrootdomain();
|
|
#endif
|
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
init_rt_bandwidth(&root_task_group.rt_bandwidth,
|
|
global_rt_period(), global_rt_runtime());
|
|
#endif /* CONFIG_RT_GROUP_SCHED */
|
|
|
|
#ifdef CONFIG_CGROUP_SCHED
|
|
task_group_cache = KMEM_CACHE(task_group, 0);
|
|
|
|
list_add(&root_task_group.list, &task_groups);
|
|
INIT_LIST_HEAD(&root_task_group.children);
|
|
INIT_LIST_HEAD(&root_task_group.siblings);
|
|
autogroup_init(&init_task);
|
|
#endif /* CONFIG_CGROUP_SCHED */
|
|
|
|
for_each_possible_cpu(i) {
|
|
struct rq *rq;
|
|
|
|
rq = cpu_rq(i);
|
|
raw_spin_lock_init(&rq->__lock);
|
|
rq->nr_running = 0;
|
|
rq->calc_load_active = 0;
|
|
rq->calc_load_update = jiffies + LOAD_FREQ;
|
|
init_cfs_rq(&rq->cfs);
|
|
init_rt_rq(&rq->rt);
|
|
init_dl_rq(&rq->dl);
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
|
|
rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
|
|
/*
|
|
* How much CPU bandwidth does root_task_group get?
|
|
*
|
|
* In case of task-groups formed thr' the cgroup filesystem, it
|
|
* gets 100% of the CPU resources in the system. This overall
|
|
* system CPU resource is divided among the tasks of
|
|
* root_task_group and its child task-groups in a fair manner,
|
|
* based on each entity's (task or task-group's) weight
|
|
* (se->load.weight).
|
|
*
|
|
* In other words, if root_task_group has 10 tasks of weight
|
|
* 1024) and two child groups A0 and A1 (of weight 1024 each),
|
|
* then A0's share of the CPU resource is:
|
|
*
|
|
* A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
|
|
*
|
|
* We achieve this by letting root_task_group's tasks sit
|
|
* directly in rq->cfs (i.e root_task_group->se[] = NULL).
|
|
*/
|
|
init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
|
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
|
|
|
rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
|
|
#endif
|
|
#ifdef CONFIG_SMP
|
|
rq->sd = NULL;
|
|
rq->rd = NULL;
|
|
rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
|
|
rq->balance_callback = &balance_push_callback;
|
|
rq->active_balance = 0;
|
|
rq->next_balance = jiffies;
|
|
rq->push_cpu = 0;
|
|
rq->cpu = i;
|
|
rq->online = 0;
|
|
rq->idle_stamp = 0;
|
|
rq->avg_idle = 2*sysctl_sched_migration_cost;
|
|
rq->wake_stamp = jiffies;
|
|
rq->wake_avg_idle = rq->avg_idle;
|
|
rq->max_idle_balance_cost = sysctl_sched_migration_cost;
|
|
|
|
INIT_LIST_HEAD(&rq->cfs_tasks);
|
|
|
|
rq_attach_root(rq, &def_root_domain);
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
|
rq->last_blocked_load_update_tick = jiffies;
|
|
atomic_set(&rq->nohz_flags, 0);
|
|
|
|
INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
|
|
#endif
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
rcuwait_init(&rq->hotplug_wait);
|
|
#endif
|
|
#endif /* CONFIG_SMP */
|
|
hrtick_rq_init(rq);
|
|
atomic_set(&rq->nr_iowait, 0);
|
|
|
|
#ifdef CONFIG_SCHED_CORE
|
|
rq->core = rq;
|
|
rq->core_pick = NULL;
|
|
rq->core_enabled = 0;
|
|
rq->core_tree = RB_ROOT;
|
|
rq->core_forceidle = false;
|
|
|
|
rq->core_cookie = 0UL;
|
|
#endif
|
|
}
|
|
|
|
set_load_weight(&init_task, false);
|
|
|
|
/*
|
|
* The boot idle thread does lazy MMU switching as well:
|
|
*/
|
|
mmgrab(&init_mm);
|
|
enter_lazy_tlb(&init_mm, current);
|
|
|
|
/*
|
|
* Make us the idle thread. Technically, schedule() should not be
|
|
* called from this thread, however somewhere below it might be,
|
|
* but because we are the idle thread, we just pick up running again
|
|
* when this runqueue becomes "idle".
|
|
*/
|
|
init_idle(current, smp_processor_id());
|
|
|
|
calc_load_update = jiffies + LOAD_FREQ;
|
|
|
|
#ifdef CONFIG_SMP
|
|
idle_thread_set_boot_cpu();
|
|
balance_push_set(smp_processor_id(), false);
|
|
#endif
|
|
init_sched_fair_class();
|
|
|
|
psi_init();
|
|
|
|
init_uclamp();
|
|
|
|
scheduler_running = 1;
|
|
}
|
|
|
|
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
|
|
static inline int preempt_count_equals(int preempt_offset)
|
|
{
|
|
int nested = preempt_count() + rcu_preempt_depth();
|
|
|
|
return (nested == preempt_offset);
|
|
}
|
|
|
|
void __might_sleep(const char *file, int line, int preempt_offset)
|
|
{
|
|
unsigned int state = get_current_state();
|
|
/*
|
|
* Blocking primitives will set (and therefore destroy) current->state,
|
|
* since we will exit with TASK_RUNNING make sure we enter with it,
|
|
* otherwise we will destroy state.
|
|
*/
|
|
WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
|
|
"do not call blocking ops when !TASK_RUNNING; "
|
|
"state=%x set at [<%p>] %pS\n", state,
|
|
(void *)current->task_state_change,
|
|
(void *)current->task_state_change);
|
|
|
|
___might_sleep(file, line, preempt_offset);
|
|
}
|
|
EXPORT_SYMBOL(__might_sleep);
|
|
|
|
void ___might_sleep(const char *file, int line, int preempt_offset)
|
|
{
|
|
/* Ratelimiting timestamp: */
|
|
static unsigned long prev_jiffy;
|
|
|
|
unsigned long preempt_disable_ip;
|
|
|
|
/* WARN_ON_ONCE() by default, no rate limit required: */
|
|
rcu_sleep_check();
|
|
|
|
if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
|
|
!is_idle_task(current) && !current->non_block_count) ||
|
|
system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
|
|
oops_in_progress)
|
|
return;
|
|
|
|
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
|
|
return;
|
|
prev_jiffy = jiffies;
|
|
|
|
/* Save this before calling printk(), since that will clobber it: */
|
|
preempt_disable_ip = get_preempt_disable_ip(current);
|
|
|
|
printk(KERN_ERR
|
|
"BUG: sleeping function called from invalid context at %s:%d\n",
|
|
file, line);
|
|
printk(KERN_ERR
|
|
"in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
|
|
in_atomic(), irqs_disabled(), current->non_block_count,
|
|
current->pid, current->comm);
|
|
|
|
if (task_stack_end_corrupted(current))
|
|
printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
|
|
|
|
debug_show_held_locks(current);
|
|
if (irqs_disabled())
|
|
print_irqtrace_events(current);
|
|
if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
|
|
&& !preempt_count_equals(preempt_offset)) {
|
|
pr_err("Preemption disabled at:");
|
|
print_ip_sym(KERN_ERR, preempt_disable_ip);
|
|
}
|
|
|
|
trace_android_rvh_schedule_bug(NULL);
|
|
|
|
dump_stack();
|
|
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
|
|
}
|
|
EXPORT_SYMBOL(___might_sleep);
|
|
|
|
void __cant_sleep(const char *file, int line, int preempt_offset)
|
|
{
|
|
static unsigned long prev_jiffy;
|
|
|
|
if (irqs_disabled())
|
|
return;
|
|
|
|
if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
|
|
return;
|
|
|
|
if (preempt_count() > preempt_offset)
|
|
return;
|
|
|
|
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
|
|
return;
|
|
prev_jiffy = jiffies;
|
|
|
|
printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
|
|
printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
|
|
in_atomic(), irqs_disabled(),
|
|
current->pid, current->comm);
|
|
|
|
debug_show_held_locks(current);
|
|
dump_stack();
|
|
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
|
|
}
|
|
EXPORT_SYMBOL_GPL(__cant_sleep);
|
|
|
|
#ifdef CONFIG_SMP
|
|
void __cant_migrate(const char *file, int line)
|
|
{
|
|
static unsigned long prev_jiffy;
|
|
|
|
if (irqs_disabled())
|
|
return;
|
|
|
|
if (is_migration_disabled(current))
|
|
return;
|
|
|
|
if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
|
|
return;
|
|
|
|
if (preempt_count() > 0)
|
|
return;
|
|
|
|
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
|
|
return;
|
|
prev_jiffy = jiffies;
|
|
|
|
pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
|
|
pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
|
|
in_atomic(), irqs_disabled(), is_migration_disabled(current),
|
|
current->pid, current->comm);
|
|
|
|
debug_show_held_locks(current);
|
|
dump_stack();
|
|
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
|
|
}
|
|
EXPORT_SYMBOL_GPL(__cant_migrate);
|
|
#endif
|
|
#endif
|
|
|
|
#ifdef CONFIG_MAGIC_SYSRQ
|
|
void normalize_rt_tasks(void)
|
|
{
|
|
struct task_struct *g, *p;
|
|
struct sched_attr attr = {
|
|
.sched_policy = SCHED_NORMAL,
|
|
};
|
|
|
|
read_lock(&tasklist_lock);
|
|
for_each_process_thread(g, p) {
|
|
/*
|
|
* Only normalize user tasks:
|
|
*/
|
|
if (p->flags & PF_KTHREAD)
|
|
continue;
|
|
|
|
p->se.exec_start = 0;
|
|
schedstat_set(p->se.statistics.wait_start, 0);
|
|
schedstat_set(p->se.statistics.sleep_start, 0);
|
|
schedstat_set(p->se.statistics.block_start, 0);
|
|
|
|
if (!dl_task(p) && !rt_task(p)) {
|
|
/*
|
|
* Renice negative nice level userspace
|
|
* tasks back to 0:
|
|
*/
|
|
if (task_nice(p) < 0)
|
|
set_user_nice(p, 0);
|
|
continue;
|
|
}
|
|
|
|
__sched_setscheduler(p, &attr, false, false);
|
|
}
|
|
read_unlock(&tasklist_lock);
|
|
}
|
|
|
|
#endif /* CONFIG_MAGIC_SYSRQ */
|
|
|
|
#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
|
|
/*
|
|
* These functions are only useful for the IA64 MCA handling, or kdb.
|
|
*
|
|
* They can only be called when the whole system has been
|
|
* stopped - every CPU needs to be quiescent, and no scheduling
|
|
* activity can take place. Using them for anything else would
|
|
* be a serious bug, and as a result, they aren't even visible
|
|
* under any other configuration.
|
|
*/
|
|
|
|
/**
|
|
* curr_task - return the current task for a given CPU.
|
|
* @cpu: the processor in question.
|
|
*
|
|
* ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
|
|
*
|
|
* Return: The current task for @cpu.
|
|
*/
|
|
struct task_struct *curr_task(int cpu)
|
|
{
|
|
return cpu_curr(cpu);
|
|
}
|
|
|
|
#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
|
|
|
|
#ifdef CONFIG_IA64
|
|
/**
|
|
* ia64_set_curr_task - set the current task for a given CPU.
|
|
* @cpu: the processor in question.
|
|
* @p: the task pointer to set.
|
|
*
|
|
* Description: This function must only be used when non-maskable interrupts
|
|
* are serviced on a separate stack. It allows the architecture to switch the
|
|
* notion of the current task on a CPU in a non-blocking manner. This function
|
|
* must be called with all CPU's synchronized, and interrupts disabled, the
|
|
* and caller must save the original value of the current task (see
|
|
* curr_task() above) and restore that value before reenabling interrupts and
|
|
* re-starting the system.
|
|
*
|
|
* ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
|
|
*/
|
|
void ia64_set_curr_task(int cpu, struct task_struct *p)
|
|
{
|
|
cpu_curr(cpu) = p;
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_CGROUP_SCHED
|
|
/* task_group_lock serializes the addition/removal of task groups */
|
|
static DEFINE_SPINLOCK(task_group_lock);
|
|
|
|
static inline void alloc_uclamp_sched_group(struct task_group *tg,
|
|
struct task_group *parent)
|
|
{
|
|
#ifdef CONFIG_UCLAMP_TASK_GROUP
|
|
enum uclamp_id clamp_id;
|
|
|
|
for_each_clamp_id(clamp_id) {
|
|
uclamp_se_set(&tg->uclamp_req[clamp_id],
|
|
uclamp_none(clamp_id), false);
|
|
tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
|
|
}
|
|
#endif
|
|
}
|
|
|
|
static void sched_free_group(struct task_group *tg)
|
|
{
|
|
free_fair_sched_group(tg);
|
|
free_rt_sched_group(tg);
|
|
autogroup_free(tg);
|
|
kmem_cache_free(task_group_cache, tg);
|
|
}
|
|
|
|
static void sched_free_group_rcu(struct rcu_head *rcu)
|
|
{
|
|
sched_free_group(container_of(rcu, struct task_group, rcu));
|
|
}
|
|
|
|
static void sched_unregister_group(struct task_group *tg)
|
|
{
|
|
unregister_fair_sched_group(tg);
|
|
unregister_rt_sched_group(tg);
|
|
/*
|
|
* We have to wait for yet another RCU grace period to expire, as
|
|
* print_cfs_stats() might run concurrently.
|
|
*/
|
|
call_rcu(&tg->rcu, sched_free_group_rcu);
|
|
}
|
|
|
|
/* allocate runqueue etc for a new task group */
|
|
struct task_group *sched_create_group(struct task_group *parent)
|
|
{
|
|
struct task_group *tg;
|
|
|
|
tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
|
|
if (!tg)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
if (!alloc_fair_sched_group(tg, parent))
|
|
goto err;
|
|
|
|
if (!alloc_rt_sched_group(tg, parent))
|
|
goto err;
|
|
|
|
alloc_uclamp_sched_group(tg, parent);
|
|
|
|
return tg;
|
|
|
|
err:
|
|
sched_free_group(tg);
|
|
return ERR_PTR(-ENOMEM);
|
|
}
|
|
|
|
void sched_online_group(struct task_group *tg, struct task_group *parent)
|
|
{
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&task_group_lock, flags);
|
|
list_add_rcu(&tg->list, &task_groups);
|
|
|
|
/* Root should already exist: */
|
|
WARN_ON(!parent);
|
|
|
|
tg->parent = parent;
|
|
INIT_LIST_HEAD(&tg->children);
|
|
list_add_rcu(&tg->siblings, &parent->children);
|
|
spin_unlock_irqrestore(&task_group_lock, flags);
|
|
|
|
online_fair_sched_group(tg);
|
|
}
|
|
|
|
/* rcu callback to free various structures associated with a task group */
|
|
static void sched_unregister_group_rcu(struct rcu_head *rhp)
|
|
{
|
|
/* Now it should be safe to free those cfs_rqs: */
|
|
sched_unregister_group(container_of(rhp, struct task_group, rcu));
|
|
}
|
|
|
|
void sched_destroy_group(struct task_group *tg)
|
|
{
|
|
/* Wait for possible concurrent references to cfs_rqs complete: */
|
|
call_rcu(&tg->rcu, sched_unregister_group_rcu);
|
|
}
|
|
|
|
void sched_release_group(struct task_group *tg)
|
|
{
|
|
unsigned long flags;
|
|
|
|
/*
|
|
* Unlink first, to avoid walk_tg_tree_from() from finding us (via
|
|
* sched_cfs_period_timer()).
|
|
*
|
|
* For this to be effective, we have to wait for all pending users of
|
|
* this task group to leave their RCU critical section to ensure no new
|
|
* user will see our dying task group any more. Specifically ensure
|
|
* that tg_unthrottle_up() won't add decayed cfs_rq's to it.
|
|
*
|
|
* We therefore defer calling unregister_fair_sched_group() to
|
|
* sched_unregister_group() which is guarantied to get called only after the
|
|
* current RCU grace period has expired.
|
|
*/
|
|
spin_lock_irqsave(&task_group_lock, flags);
|
|
list_del_rcu(&tg->list);
|
|
list_del_rcu(&tg->siblings);
|
|
spin_unlock_irqrestore(&task_group_lock, flags);
|
|
}
|
|
|
|
static void sched_change_group(struct task_struct *tsk, int type)
|
|
{
|
|
struct task_group *tg;
|
|
|
|
/*
|
|
* All callers are synchronized by task_rq_lock(); we do not use RCU
|
|
* which is pointless here. Thus, we pass "true" to task_css_check()
|
|
* to prevent lockdep warnings.
|
|
*/
|
|
tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
|
|
struct task_group, css);
|
|
tg = autogroup_task_group(tsk, tg);
|
|
tsk->sched_task_group = tg;
|
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
if (tsk->sched_class->task_change_group)
|
|
tsk->sched_class->task_change_group(tsk, type);
|
|
else
|
|
#endif
|
|
set_task_rq(tsk, task_cpu(tsk));
|
|
}
|
|
|
|
/*
|
|
* Change task's runqueue when it moves between groups.
|
|
*
|
|
* The caller of this function should have put the task in its new group by
|
|
* now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
|
|
* its new group.
|
|
*/
|
|
void sched_move_task(struct task_struct *tsk)
|
|
{
|
|
int queued, running, queue_flags =
|
|
DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
|
|
struct rq_flags rf;
|
|
struct rq *rq;
|
|
|
|
rq = task_rq_lock(tsk, &rf);
|
|
update_rq_clock(rq);
|
|
|
|
running = task_current(rq, tsk);
|
|
queued = task_on_rq_queued(tsk);
|
|
|
|
if (queued)
|
|
dequeue_task(rq, tsk, queue_flags);
|
|
if (running)
|
|
put_prev_task(rq, tsk);
|
|
|
|
sched_change_group(tsk, TASK_MOVE_GROUP);
|
|
|
|
if (queued)
|
|
enqueue_task(rq, tsk, queue_flags);
|
|
if (running) {
|
|
set_next_task(rq, tsk);
|
|
/*
|
|
* After changing group, the running task may have joined a
|
|
* throttled one but it's still the running task. Trigger a
|
|
* resched to make sure that task can still run.
|
|
*/
|
|
resched_curr(rq);
|
|
}
|
|
|
|
task_rq_unlock(rq, tsk, &rf);
|
|
}
|
|
|
|
static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
|
|
{
|
|
return css ? container_of(css, struct task_group, css) : NULL;
|
|
}
|
|
|
|
static struct cgroup_subsys_state *
|
|
cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
|
|
{
|
|
struct task_group *parent = css_tg(parent_css);
|
|
struct task_group *tg;
|
|
|
|
if (!parent) {
|
|
/* This is early initialization for the top cgroup */
|
|
return &root_task_group.css;
|
|
}
|
|
|
|
tg = sched_create_group(parent);
|
|
if (IS_ERR(tg))
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
return &tg->css;
|
|
}
|
|
|
|
/* Expose task group only after completing cgroup initialization */
|
|
static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
|
|
{
|
|
struct task_group *tg = css_tg(css);
|
|
struct task_group *parent = css_tg(css->parent);
|
|
|
|
if (parent)
|
|
sched_online_group(tg, parent);
|
|
|
|
#ifdef CONFIG_UCLAMP_TASK_GROUP
|
|
/* Propagate the effective uclamp value for the new group */
|
|
mutex_lock(&uclamp_mutex);
|
|
rcu_read_lock();
|
|
cpu_util_update_eff(css);
|
|
rcu_read_unlock();
|
|
mutex_unlock(&uclamp_mutex);
|
|
#endif
|
|
|
|
trace_android_rvh_cpu_cgroup_online(css);
|
|
return 0;
|
|
}
|
|
|
|
static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
|
|
{
|
|
struct task_group *tg = css_tg(css);
|
|
|
|
sched_release_group(tg);
|
|
}
|
|
|
|
static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
|
|
{
|
|
struct task_group *tg = css_tg(css);
|
|
|
|
/*
|
|
* Relies on the RCU grace period between css_released() and this.
|
|
*/
|
|
sched_unregister_group(tg);
|
|
}
|
|
|
|
/*
|
|
* This is called before wake_up_new_task(), therefore we really only
|
|
* have to set its group bits, all the other stuff does not apply.
|
|
*/
|
|
static void cpu_cgroup_fork(struct task_struct *task)
|
|
{
|
|
struct rq_flags rf;
|
|
struct rq *rq;
|
|
|
|
rq = task_rq_lock(task, &rf);
|
|
|
|
update_rq_clock(rq);
|
|
sched_change_group(task, TASK_SET_GROUP);
|
|
|
|
task_rq_unlock(rq, task, &rf);
|
|
}
|
|
|
|
static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
|
|
{
|
|
struct task_struct *task;
|
|
struct cgroup_subsys_state *css;
|
|
int ret = 0;
|
|
|
|
cgroup_taskset_for_each(task, css, tset) {
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
if (!sched_rt_can_attach(css_tg(css), task))
|
|
return -EINVAL;
|
|
#endif
|
|
/*
|
|
* Serialize against wake_up_new_task() such that if it's
|
|
* running, we're sure to observe its full state.
|
|
*/
|
|
raw_spin_lock_irq(&task->pi_lock);
|
|
/*
|
|
* Avoid calling sched_move_task() before wake_up_new_task()
|
|
* has happened. This would lead to problems with PELT, due to
|
|
* move wanting to detach+attach while we're not attached yet.
|
|
*/
|
|
if (READ_ONCE(task->__state) == TASK_NEW)
|
|
ret = -EINVAL;
|
|
raw_spin_unlock_irq(&task->pi_lock);
|
|
|
|
if (ret)
|
|
break;
|
|
}
|
|
|
|
trace_android_rvh_cpu_cgroup_can_attach(tset, &ret);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void cpu_cgroup_attach(struct cgroup_taskset *tset)
|
|
{
|
|
struct task_struct *task;
|
|
struct cgroup_subsys_state *css;
|
|
|
|
cgroup_taskset_for_each(task, css, tset)
|
|
sched_move_task(task);
|
|
|
|
trace_android_rvh_cpu_cgroup_attach(tset);
|
|
}
|
|
|
|
#ifdef CONFIG_UCLAMP_TASK_GROUP
|
|
static void cpu_util_update_eff(struct cgroup_subsys_state *css)
|
|
{
|
|
struct cgroup_subsys_state *top_css = css;
|
|
struct uclamp_se *uc_parent = NULL;
|
|
struct uclamp_se *uc_se = NULL;
|
|
unsigned int eff[UCLAMP_CNT];
|
|
enum uclamp_id clamp_id;
|
|
unsigned int clamps;
|
|
|
|
lockdep_assert_held(&uclamp_mutex);
|
|
SCHED_WARN_ON(!rcu_read_lock_held());
|
|
|
|
css_for_each_descendant_pre(css, top_css) {
|
|
uc_parent = css_tg(css)->parent
|
|
? css_tg(css)->parent->uclamp : NULL;
|
|
|
|
for_each_clamp_id(clamp_id) {
|
|
/* Assume effective clamps matches requested clamps */
|
|
eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
|
|
/* Cap effective clamps with parent's effective clamps */
|
|
if (uc_parent &&
|
|
eff[clamp_id] > uc_parent[clamp_id].value) {
|
|
eff[clamp_id] = uc_parent[clamp_id].value;
|
|
}
|
|
}
|
|
/* Ensure protection is always capped by limit */
|
|
eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
|
|
|
|
/* Propagate most restrictive effective clamps */
|
|
clamps = 0x0;
|
|
uc_se = css_tg(css)->uclamp;
|
|
for_each_clamp_id(clamp_id) {
|
|
if (eff[clamp_id] == uc_se[clamp_id].value)
|
|
continue;
|
|
uc_se[clamp_id].value = eff[clamp_id];
|
|
uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
|
|
clamps |= (0x1 << clamp_id);
|
|
}
|
|
if (!clamps) {
|
|
css = css_rightmost_descendant(css);
|
|
continue;
|
|
}
|
|
|
|
/* Immediately update descendants RUNNABLE tasks */
|
|
uclamp_update_active_tasks(css);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Integer 10^N with a given N exponent by casting to integer the literal "1eN"
|
|
* C expression. Since there is no way to convert a macro argument (N) into a
|
|
* character constant, use two levels of macros.
|
|
*/
|
|
#define _POW10(exp) ((unsigned int)1e##exp)
|
|
#define POW10(exp) _POW10(exp)
|
|
|
|
struct uclamp_request {
|
|
#define UCLAMP_PERCENT_SHIFT 2
|
|
#define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT))
|
|
s64 percent;
|
|
u64 util;
|
|
int ret;
|
|
};
|
|
|
|
static inline struct uclamp_request
|
|
capacity_from_percent(char *buf)
|
|
{
|
|
struct uclamp_request req = {
|
|
.percent = UCLAMP_PERCENT_SCALE,
|
|
.util = SCHED_CAPACITY_SCALE,
|
|
.ret = 0,
|
|
};
|
|
|
|
buf = strim(buf);
|
|
if (strcmp(buf, "max")) {
|
|
req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
|
|
&req.percent);
|
|
if (req.ret)
|
|
return req;
|
|
if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
|
|
req.ret = -ERANGE;
|
|
return req;
|
|
}
|
|
|
|
req.util = req.percent << SCHED_CAPACITY_SHIFT;
|
|
req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
|
|
}
|
|
|
|
return req;
|
|
}
|
|
|
|
static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
|
|
size_t nbytes, loff_t off,
|
|
enum uclamp_id clamp_id)
|
|
{
|
|
struct uclamp_request req;
|
|
struct task_group *tg;
|
|
|
|
req = capacity_from_percent(buf);
|
|
if (req.ret)
|
|
return req.ret;
|
|
|
|
static_branch_enable(&sched_uclamp_used);
|
|
|
|
mutex_lock(&uclamp_mutex);
|
|
rcu_read_lock();
|
|
|
|
tg = css_tg(of_css(of));
|
|
if (tg->uclamp_req[clamp_id].value != req.util)
|
|
uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
|
|
|
|
/*
|
|
* Because of not recoverable conversion rounding we keep track of the
|
|
* exact requested value
|
|
*/
|
|
tg->uclamp_pct[clamp_id] = req.percent;
|
|
|
|
/* Update effective clamps to track the most restrictive value */
|
|
cpu_util_update_eff(of_css(of));
|
|
|
|
rcu_read_unlock();
|
|
mutex_unlock(&uclamp_mutex);
|
|
|
|
return nbytes;
|
|
}
|
|
|
|
static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
|
|
char *buf, size_t nbytes,
|
|
loff_t off)
|
|
{
|
|
return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
|
|
}
|
|
|
|
static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
|
|
char *buf, size_t nbytes,
|
|
loff_t off)
|
|
{
|
|
return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
|
|
}
|
|
|
|
static inline void cpu_uclamp_print(struct seq_file *sf,
|
|
enum uclamp_id clamp_id)
|
|
{
|
|
struct task_group *tg;
|
|
u64 util_clamp;
|
|
u64 percent;
|
|
u32 rem;
|
|
|
|
rcu_read_lock();
|
|
tg = css_tg(seq_css(sf));
|
|
util_clamp = tg->uclamp_req[clamp_id].value;
|
|
rcu_read_unlock();
|
|
|
|
if (util_clamp == SCHED_CAPACITY_SCALE) {
|
|
seq_puts(sf, "max\n");
|
|
return;
|
|
}
|
|
|
|
percent = tg->uclamp_pct[clamp_id];
|
|
percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
|
|
seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
|
|
}
|
|
|
|
static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
|
|
{
|
|
cpu_uclamp_print(sf, UCLAMP_MIN);
|
|
return 0;
|
|
}
|
|
|
|
static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
|
|
{
|
|
cpu_uclamp_print(sf, UCLAMP_MAX);
|
|
return 0;
|
|
}
|
|
|
|
static int cpu_uclamp_ls_write_u64(struct cgroup_subsys_state *css,
|
|
struct cftype *cftype, u64 ls)
|
|
{
|
|
struct task_group *tg;
|
|
|
|
if (ls > 1)
|
|
return -EINVAL;
|
|
tg = css_tg(css);
|
|
tg->latency_sensitive = (unsigned int) ls;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static u64 cpu_uclamp_ls_read_u64(struct cgroup_subsys_state *css,
|
|
struct cftype *cft)
|
|
{
|
|
struct task_group *tg = css_tg(css);
|
|
|
|
return (u64) tg->latency_sensitive;
|
|
}
|
|
#endif /* CONFIG_UCLAMP_TASK_GROUP */
|
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
|
|
struct cftype *cftype, u64 shareval)
|
|
{
|
|
if (shareval > scale_load_down(ULONG_MAX))
|
|
shareval = MAX_SHARES;
|
|
return sched_group_set_shares(css_tg(css), scale_load(shareval));
|
|
}
|
|
|
|
static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
|
|
struct cftype *cft)
|
|
{
|
|
struct task_group *tg = css_tg(css);
|
|
|
|
return (u64) scale_load_down(tg->shares);
|
|
}
|
|
|
|
#ifdef CONFIG_CFS_BANDWIDTH
|
|
static DEFINE_MUTEX(cfs_constraints_mutex);
|
|
|
|
const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
|
|
static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
|
|
/* More than 203 days if BW_SHIFT equals 20. */
|
|
static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
|
|
|
|
static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
|
|
|
|
static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
|
|
u64 burst)
|
|
{
|
|
int i, ret = 0, runtime_enabled, runtime_was_enabled;
|
|
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
|
|
|
|
if (tg == &root_task_group)
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* Ensure we have at some amount of bandwidth every period. This is
|
|
* to prevent reaching a state of large arrears when throttled via
|
|
* entity_tick() resulting in prolonged exit starvation.
|
|
*/
|
|
if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* Likewise, bound things on the other side by preventing insane quota
|
|
* periods. This also allows us to normalize in computing quota
|
|
* feasibility.
|
|
*/
|
|
if (period > max_cfs_quota_period)
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* Bound quota to defend quota against overflow during bandwidth shift.
|
|
*/
|
|
if (quota != RUNTIME_INF && quota > max_cfs_runtime)
|
|
return -EINVAL;
|
|
|
|
if (quota != RUNTIME_INF && (burst > quota ||
|
|
burst + quota > max_cfs_runtime))
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* Prevent race between setting of cfs_rq->runtime_enabled and
|
|
* unthrottle_offline_cfs_rqs().
|
|
*/
|
|
cpus_read_lock();
|
|
mutex_lock(&cfs_constraints_mutex);
|
|
ret = __cfs_schedulable(tg, period, quota);
|
|
if (ret)
|
|
goto out_unlock;
|
|
|
|
runtime_enabled = quota != RUNTIME_INF;
|
|
runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
|
|
/*
|
|
* If we need to toggle cfs_bandwidth_used, off->on must occur
|
|
* before making related changes, and on->off must occur afterwards
|
|
*/
|
|
if (runtime_enabled && !runtime_was_enabled)
|
|
cfs_bandwidth_usage_inc();
|
|
raw_spin_lock_irq(&cfs_b->lock);
|
|
cfs_b->period = ns_to_ktime(period);
|
|
cfs_b->quota = quota;
|
|
cfs_b->burst = burst;
|
|
|
|
__refill_cfs_bandwidth_runtime(cfs_b);
|
|
|
|
/* Restart the period timer (if active) to handle new period expiry: */
|
|
if (runtime_enabled)
|
|
start_cfs_bandwidth(cfs_b);
|
|
|
|
raw_spin_unlock_irq(&cfs_b->lock);
|
|
|
|
for_each_online_cpu(i) {
|
|
struct cfs_rq *cfs_rq = tg->cfs_rq[i];
|
|
struct rq *rq = cfs_rq->rq;
|
|
struct rq_flags rf;
|
|
|
|
rq_lock_irq(rq, &rf);
|
|
cfs_rq->runtime_enabled = runtime_enabled;
|
|
cfs_rq->runtime_remaining = 0;
|
|
|
|
if (cfs_rq->throttled)
|
|
unthrottle_cfs_rq(cfs_rq);
|
|
rq_unlock_irq(rq, &rf);
|
|
}
|
|
if (runtime_was_enabled && !runtime_enabled)
|
|
cfs_bandwidth_usage_dec();
|
|
out_unlock:
|
|
mutex_unlock(&cfs_constraints_mutex);
|
|
cpus_read_unlock();
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
|
|
{
|
|
u64 quota, period, burst;
|
|
|
|
period = ktime_to_ns(tg->cfs_bandwidth.period);
|
|
burst = tg->cfs_bandwidth.burst;
|
|
if (cfs_quota_us < 0)
|
|
quota = RUNTIME_INF;
|
|
else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
|
|
quota = (u64)cfs_quota_us * NSEC_PER_USEC;
|
|
else
|
|
return -EINVAL;
|
|
|
|
return tg_set_cfs_bandwidth(tg, period, quota, burst);
|
|
}
|
|
|
|
static long tg_get_cfs_quota(struct task_group *tg)
|
|
{
|
|
u64 quota_us;
|
|
|
|
if (tg->cfs_bandwidth.quota == RUNTIME_INF)
|
|
return -1;
|
|
|
|
quota_us = tg->cfs_bandwidth.quota;
|
|
do_div(quota_us, NSEC_PER_USEC);
|
|
|
|
return quota_us;
|
|
}
|
|
|
|
static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
|
|
{
|
|
u64 quota, period, burst;
|
|
|
|
if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
|
|
return -EINVAL;
|
|
|
|
period = (u64)cfs_period_us * NSEC_PER_USEC;
|
|
quota = tg->cfs_bandwidth.quota;
|
|
burst = tg->cfs_bandwidth.burst;
|
|
|
|
return tg_set_cfs_bandwidth(tg, period, quota, burst);
|
|
}
|
|
|
|
static long tg_get_cfs_period(struct task_group *tg)
|
|
{
|
|
u64 cfs_period_us;
|
|
|
|
cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
|
|
do_div(cfs_period_us, NSEC_PER_USEC);
|
|
|
|
return cfs_period_us;
|
|
}
|
|
|
|
static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us)
|
|
{
|
|
u64 quota, period, burst;
|
|
|
|
if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC)
|
|
return -EINVAL;
|
|
|
|
burst = (u64)cfs_burst_us * NSEC_PER_USEC;
|
|
period = ktime_to_ns(tg->cfs_bandwidth.period);
|
|
quota = tg->cfs_bandwidth.quota;
|
|
|
|
return tg_set_cfs_bandwidth(tg, period, quota, burst);
|
|
}
|
|
|
|
static long tg_get_cfs_burst(struct task_group *tg)
|
|
{
|
|
u64 burst_us;
|
|
|
|
burst_us = tg->cfs_bandwidth.burst;
|
|
do_div(burst_us, NSEC_PER_USEC);
|
|
|
|
return burst_us;
|
|
}
|
|
|
|
static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
|
|
struct cftype *cft)
|
|
{
|
|
return tg_get_cfs_quota(css_tg(css));
|
|
}
|
|
|
|
static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
|
|
struct cftype *cftype, s64 cfs_quota_us)
|
|
{
|
|
return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
|
|
}
|
|
|
|
static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
|
|
struct cftype *cft)
|
|
{
|
|
return tg_get_cfs_period(css_tg(css));
|
|
}
|
|
|
|
static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
|
|
struct cftype *cftype, u64 cfs_period_us)
|
|
{
|
|
return tg_set_cfs_period(css_tg(css), cfs_period_us);
|
|
}
|
|
|
|
static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css,
|
|
struct cftype *cft)
|
|
{
|
|
return tg_get_cfs_burst(css_tg(css));
|
|
}
|
|
|
|
static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css,
|
|
struct cftype *cftype, u64 cfs_burst_us)
|
|
{
|
|
return tg_set_cfs_burst(css_tg(css), cfs_burst_us);
|
|
}
|
|
|
|
struct cfs_schedulable_data {
|
|
struct task_group *tg;
|
|
u64 period, quota;
|
|
};
|
|
|
|
/*
|
|
* normalize group quota/period to be quota/max_period
|
|
* note: units are usecs
|
|
*/
|
|
static u64 normalize_cfs_quota(struct task_group *tg,
|
|
struct cfs_schedulable_data *d)
|
|
{
|
|
u64 quota, period;
|
|
|
|
if (tg == d->tg) {
|
|
period = d->period;
|
|
quota = d->quota;
|
|
} else {
|
|
period = tg_get_cfs_period(tg);
|
|
quota = tg_get_cfs_quota(tg);
|
|
}
|
|
|
|
/* note: these should typically be equivalent */
|
|
if (quota == RUNTIME_INF || quota == -1)
|
|
return RUNTIME_INF;
|
|
|
|
return to_ratio(period, quota);
|
|
}
|
|
|
|
static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
|
|
{
|
|
struct cfs_schedulable_data *d = data;
|
|
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
|
|
s64 quota = 0, parent_quota = -1;
|
|
|
|
if (!tg->parent) {
|
|
quota = RUNTIME_INF;
|
|
} else {
|
|
struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
|
|
|
|
quota = normalize_cfs_quota(tg, d);
|
|
parent_quota = parent_b->hierarchical_quota;
|
|
|
|
/*
|
|
* Ensure max(child_quota) <= parent_quota. On cgroup2,
|
|
* always take the min. On cgroup1, only inherit when no
|
|
* limit is set:
|
|
*/
|
|
if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
|
|
quota = min(quota, parent_quota);
|
|
} else {
|
|
if (quota == RUNTIME_INF)
|
|
quota = parent_quota;
|
|
else if (parent_quota != RUNTIME_INF && quota > parent_quota)
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
cfs_b->hierarchical_quota = quota;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
|
|
{
|
|
int ret;
|
|
struct cfs_schedulable_data data = {
|
|
.tg = tg,
|
|
.period = period,
|
|
.quota = quota,
|
|
};
|
|
|
|
if (quota != RUNTIME_INF) {
|
|
do_div(data.period, NSEC_PER_USEC);
|
|
do_div(data.quota, NSEC_PER_USEC);
|
|
}
|
|
|
|
rcu_read_lock();
|
|
ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
|
|
rcu_read_unlock();
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
|
|
{
|
|
struct task_group *tg = css_tg(seq_css(sf));
|
|
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
|
|
|
|
seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
|
|
seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
|
|
seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
|
|
|
|
if (schedstat_enabled() && tg != &root_task_group) {
|
|
u64 ws = 0;
|
|
int i;
|
|
|
|
for_each_possible_cpu(i)
|
|
ws += schedstat_val(tg->se[i]->statistics.wait_sum);
|
|
|
|
seq_printf(sf, "wait_sum %llu\n", ws);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_CFS_BANDWIDTH */
|
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
|
|
struct cftype *cft, s64 val)
|
|
{
|
|
return sched_group_set_rt_runtime(css_tg(css), val);
|
|
}
|
|
|
|
static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
|
|
struct cftype *cft)
|
|
{
|
|
return sched_group_rt_runtime(css_tg(css));
|
|
}
|
|
|
|
static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
|
|
struct cftype *cftype, u64 rt_period_us)
|
|
{
|
|
return sched_group_set_rt_period(css_tg(css), rt_period_us);
|
|
}
|
|
|
|
static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
|
|
struct cftype *cft)
|
|
{
|
|
return sched_group_rt_period(css_tg(css));
|
|
}
|
|
#endif /* CONFIG_RT_GROUP_SCHED */
|
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
|
|
struct cftype *cft)
|
|
{
|
|
return css_tg(css)->idle;
|
|
}
|
|
|
|
static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
|
|
struct cftype *cft, s64 idle)
|
|
{
|
|
return sched_group_set_idle(css_tg(css), idle);
|
|
}
|
|
#endif
|
|
|
|
static struct cftype cpu_legacy_files[] = {
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
{
|
|
.name = "shares",
|
|
.read_u64 = cpu_shares_read_u64,
|
|
.write_u64 = cpu_shares_write_u64,
|
|
},
|
|
{
|
|
.name = "idle",
|
|
.read_s64 = cpu_idle_read_s64,
|
|
.write_s64 = cpu_idle_write_s64,
|
|
},
|
|
#endif
|
|
#ifdef CONFIG_CFS_BANDWIDTH
|
|
{
|
|
.name = "cfs_quota_us",
|
|
.read_s64 = cpu_cfs_quota_read_s64,
|
|
.write_s64 = cpu_cfs_quota_write_s64,
|
|
},
|
|
{
|
|
.name = "cfs_period_us",
|
|
.read_u64 = cpu_cfs_period_read_u64,
|
|
.write_u64 = cpu_cfs_period_write_u64,
|
|
},
|
|
{
|
|
.name = "cfs_burst_us",
|
|
.read_u64 = cpu_cfs_burst_read_u64,
|
|
.write_u64 = cpu_cfs_burst_write_u64,
|
|
},
|
|
{
|
|
.name = "stat",
|
|
.seq_show = cpu_cfs_stat_show,
|
|
},
|
|
#endif
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
{
|
|
.name = "rt_runtime_us",
|
|
.read_s64 = cpu_rt_runtime_read,
|
|
.write_s64 = cpu_rt_runtime_write,
|
|
},
|
|
{
|
|
.name = "rt_period_us",
|
|
.read_u64 = cpu_rt_period_read_uint,
|
|
.write_u64 = cpu_rt_period_write_uint,
|
|
},
|
|
#endif
|
|
#ifdef CONFIG_UCLAMP_TASK_GROUP
|
|
{
|
|
.name = "uclamp.min",
|
|
.flags = CFTYPE_NOT_ON_ROOT,
|
|
.seq_show = cpu_uclamp_min_show,
|
|
.write = cpu_uclamp_min_write,
|
|
},
|
|
{
|
|
.name = "uclamp.max",
|
|
.flags = CFTYPE_NOT_ON_ROOT,
|
|
.seq_show = cpu_uclamp_max_show,
|
|
.write = cpu_uclamp_max_write,
|
|
},
|
|
{
|
|
.name = "uclamp.latency_sensitive",
|
|
.flags = CFTYPE_NOT_ON_ROOT,
|
|
.read_u64 = cpu_uclamp_ls_read_u64,
|
|
.write_u64 = cpu_uclamp_ls_write_u64,
|
|
},
|
|
#endif
|
|
{ } /* Terminate */
|
|
};
|
|
|
|
static int cpu_extra_stat_show(struct seq_file *sf,
|
|
struct cgroup_subsys_state *css)
|
|
{
|
|
#ifdef CONFIG_CFS_BANDWIDTH
|
|
{
|
|
struct task_group *tg = css_tg(css);
|
|
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
|
|
u64 throttled_usec;
|
|
|
|
throttled_usec = cfs_b->throttled_time;
|
|
do_div(throttled_usec, NSEC_PER_USEC);
|
|
|
|
seq_printf(sf, "nr_periods %d\n"
|
|
"nr_throttled %d\n"
|
|
"throttled_usec %llu\n",
|
|
cfs_b->nr_periods, cfs_b->nr_throttled,
|
|
throttled_usec);
|
|
}
|
|
#endif
|
|
return 0;
|
|
}
|
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
|
|
struct cftype *cft)
|
|
{
|
|
struct task_group *tg = css_tg(css);
|
|
u64 weight = scale_load_down(tg->shares);
|
|
|
|
return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024);
|
|
}
|
|
|
|
static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
|
|
struct cftype *cft, u64 weight)
|
|
{
|
|
/*
|
|
* cgroup weight knobs should use the common MIN, DFL and MAX
|
|
* values which are 1, 100 and 10000 respectively. While it loses
|
|
* a bit of range on both ends, it maps pretty well onto the shares
|
|
* value used by scheduler and the round-trip conversions preserve
|
|
* the original value over the entire range.
|
|
*/
|
|
if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX)
|
|
return -ERANGE;
|
|
|
|
weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL);
|
|
|
|
return sched_group_set_shares(css_tg(css), scale_load(weight));
|
|
}
|
|
|
|
static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
|
|
struct cftype *cft)
|
|
{
|
|
unsigned long weight = scale_load_down(css_tg(css)->shares);
|
|
int last_delta = INT_MAX;
|
|
int prio, delta;
|
|
|
|
/* find the closest nice value to the current weight */
|
|
for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
|
|
delta = abs(sched_prio_to_weight[prio] - weight);
|
|
if (delta >= last_delta)
|
|
break;
|
|
last_delta = delta;
|
|
}
|
|
|
|
return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
|
|
}
|
|
|
|
static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
|
|
struct cftype *cft, s64 nice)
|
|
{
|
|
unsigned long weight;
|
|
int idx;
|
|
|
|
if (nice < MIN_NICE || nice > MAX_NICE)
|
|
return -ERANGE;
|
|
|
|
idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
|
|
idx = array_index_nospec(idx, 40);
|
|
weight = sched_prio_to_weight[idx];
|
|
|
|
return sched_group_set_shares(css_tg(css), scale_load(weight));
|
|
}
|
|
#endif
|
|
|
|
static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
|
|
long period, long quota)
|
|
{
|
|
if (quota < 0)
|
|
seq_puts(sf, "max");
|
|
else
|
|
seq_printf(sf, "%ld", quota);
|
|
|
|
seq_printf(sf, " %ld\n", period);
|
|
}
|
|
|
|
/* caller should put the current value in *@periodp before calling */
|
|
static int __maybe_unused cpu_period_quota_parse(char *buf,
|
|
u64 *periodp, u64 *quotap)
|
|
{
|
|
char tok[21]; /* U64_MAX */
|
|
|
|
if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
|
|
return -EINVAL;
|
|
|
|
*periodp *= NSEC_PER_USEC;
|
|
|
|
if (sscanf(tok, "%llu", quotap))
|
|
*quotap *= NSEC_PER_USEC;
|
|
else if (!strcmp(tok, "max"))
|
|
*quotap = RUNTIME_INF;
|
|
else
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
}
|
|
|
|
#ifdef CONFIG_CFS_BANDWIDTH
|
|
static int cpu_max_show(struct seq_file *sf, void *v)
|
|
{
|
|
struct task_group *tg = css_tg(seq_css(sf));
|
|
|
|
cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg));
|
|
return 0;
|
|
}
|
|
|
|
static ssize_t cpu_max_write(struct kernfs_open_file *of,
|
|
char *buf, size_t nbytes, loff_t off)
|
|
{
|
|
struct task_group *tg = css_tg(of_css(of));
|
|
u64 period = tg_get_cfs_period(tg);
|
|
u64 burst = tg_get_cfs_burst(tg);
|
|
u64 quota;
|
|
int ret;
|
|
|
|
ret = cpu_period_quota_parse(buf, &period, "a);
|
|
if (!ret)
|
|
ret = tg_set_cfs_bandwidth(tg, period, quota, burst);
|
|
return ret ?: nbytes;
|
|
}
|
|
#endif
|
|
|
|
static struct cftype cpu_files[] = {
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
{
|
|
.name = "weight",
|
|
.flags = CFTYPE_NOT_ON_ROOT,
|
|
.read_u64 = cpu_weight_read_u64,
|
|
.write_u64 = cpu_weight_write_u64,
|
|
},
|
|
{
|
|
.name = "weight.nice",
|
|
.flags = CFTYPE_NOT_ON_ROOT,
|
|
.read_s64 = cpu_weight_nice_read_s64,
|
|
.write_s64 = cpu_weight_nice_write_s64,
|
|
},
|
|
{
|
|
.name = "idle",
|
|
.flags = CFTYPE_NOT_ON_ROOT,
|
|
.read_s64 = cpu_idle_read_s64,
|
|
.write_s64 = cpu_idle_write_s64,
|
|
},
|
|
#endif
|
|
#ifdef CONFIG_CFS_BANDWIDTH
|
|
{
|
|
.name = "max",
|
|
.flags = CFTYPE_NOT_ON_ROOT,
|
|
.seq_show = cpu_max_show,
|
|
.write = cpu_max_write,
|
|
},
|
|
{
|
|
.name = "max.burst",
|
|
.flags = CFTYPE_NOT_ON_ROOT,
|
|
.read_u64 = cpu_cfs_burst_read_u64,
|
|
.write_u64 = cpu_cfs_burst_write_u64,
|
|
},
|
|
#endif
|
|
#ifdef CONFIG_UCLAMP_TASK_GROUP
|
|
{
|
|
.name = "uclamp.min",
|
|
.flags = CFTYPE_NOT_ON_ROOT,
|
|
.seq_show = cpu_uclamp_min_show,
|
|
.write = cpu_uclamp_min_write,
|
|
},
|
|
{
|
|
.name = "uclamp.max",
|
|
.flags = CFTYPE_NOT_ON_ROOT,
|
|
.seq_show = cpu_uclamp_max_show,
|
|
.write = cpu_uclamp_max_write,
|
|
},
|
|
{
|
|
.name = "uclamp.latency_sensitive",
|
|
.flags = CFTYPE_NOT_ON_ROOT,
|
|
.read_u64 = cpu_uclamp_ls_read_u64,
|
|
.write_u64 = cpu_uclamp_ls_write_u64,
|
|
},
|
|
#endif
|
|
{ } /* terminate */
|
|
};
|
|
|
|
struct cgroup_subsys cpu_cgrp_subsys = {
|
|
.css_alloc = cpu_cgroup_css_alloc,
|
|
.css_online = cpu_cgroup_css_online,
|
|
.css_released = cpu_cgroup_css_released,
|
|
.css_free = cpu_cgroup_css_free,
|
|
.css_extra_stat_show = cpu_extra_stat_show,
|
|
.fork = cpu_cgroup_fork,
|
|
.can_attach = cpu_cgroup_can_attach,
|
|
.attach = cpu_cgroup_attach,
|
|
.legacy_cftypes = cpu_legacy_files,
|
|
.dfl_cftypes = cpu_files,
|
|
.early_init = true,
|
|
.threaded = true,
|
|
};
|
|
|
|
#endif /* CONFIG_CGROUP_SCHED */
|
|
|
|
void dump_cpu_task(int cpu)
|
|
{
|
|
pr_info("Task dump for CPU %d:\n", cpu);
|
|
sched_show_task(cpu_curr(cpu));
|
|
}
|
|
|
|
/*
|
|
* Nice levels are multiplicative, with a gentle 10% change for every
|
|
* nice level changed. I.e. when a CPU-bound task goes from nice 0 to
|
|
* nice 1, it will get ~10% less CPU time than another CPU-bound task
|
|
* that remained on nice 0.
|
|
*
|
|
* The "10% effect" is relative and cumulative: from _any_ nice level,
|
|
* if you go up 1 level, it's -10% CPU usage, if you go down 1 level
|
|
* it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
|
|
* If a task goes up by ~10% and another task goes down by ~10% then
|
|
* the relative distance between them is ~25%.)
|
|
*/
|
|
const int sched_prio_to_weight[40] = {
|
|
/* -20 */ 88761, 71755, 56483, 46273, 36291,
|
|
/* -15 */ 29154, 23254, 18705, 14949, 11916,
|
|
/* -10 */ 9548, 7620, 6100, 4904, 3906,
|
|
/* -5 */ 3121, 2501, 1991, 1586, 1277,
|
|
/* 0 */ 1024, 820, 655, 526, 423,
|
|
/* 5 */ 335, 272, 215, 172, 137,
|
|
/* 10 */ 110, 87, 70, 56, 45,
|
|
/* 15 */ 36, 29, 23, 18, 15,
|
|
};
|
|
|
|
/*
|
|
* Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated.
|
|
*
|
|
* In cases where the weight does not change often, we can use the
|
|
* precalculated inverse to speed up arithmetics by turning divisions
|
|
* into multiplications:
|
|
*/
|
|
const u32 sched_prio_to_wmult[40] = {
|
|
/* -20 */ 48388, 59856, 76040, 92818, 118348,
|
|
/* -15 */ 147320, 184698, 229616, 287308, 360437,
|
|
/* -10 */ 449829, 563644, 704093, 875809, 1099582,
|
|
/* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
|
|
/* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
|
|
/* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
|
|
/* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
|
|
/* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
|
|
};
|
|
|
|
void call_trace_sched_update_nr_running(struct rq *rq, int count)
|
|
{
|
|
trace_sched_update_nr_running_tp(rq, count);
|
|
}
|