This is the merge of the upstream LTS release of 5.15.78 into the android13-5.15 branch. It contains the following commits:c73b4619adANDROID: abi preservation for fscrypt change in 5.15.781960b1e610Merge 5.15.78 into android13-5.15-lts7048384c98Revert "net: macb: Specify PHY PM management done by MAC"845a2cc2e8Revert "perf: Fix missing SIGTRAPs"6f88ababa4Merge 5.15.77 into android13-5.15-lts92f701cae0Revert "net: phylink: add mac_managed_pm in phylink_config structure"8d9f3b2315Revert "arm64: errata: Remove AES hwcap for COMPAT tasks"eaa46dd972Merge 5.15.76 into android13-5.15-ltsd9d889009bANDROID: properly copy the scm_io_uring field in struct sk_buff16d4484281ANDROID: fix up struct sk_buf ABI breakage2d19e77e73Revert "bpf: Fix reference state management for synchronous callbacks"c18696c060Revert "tracing: Wake up ring buffer waiters on closing of the file"d122aaf804Revert "tracing: Add ioctl() to force ring buffer waiters to wake up"dfd3aa1729ANDROID: rename struct tcm_sock.cwnd_usage_seq to fix ABIc51f6b79ceANDROID: rename struct task_struct.in_eventfd to fix ABI85725fbe42Revert "ALSA: usb-audio: Register card at the last interface"8f38fb79b5Revert "ALSA: usb-audio: Fix last interface check for registration"3ebc180a96Revert "serial: 8250: Let drivers request full 16550A feature probing"f6d7d7caaaRevert "serial: 8250: Request full 16550A feature probing for OxSemi PCIe devices"d1096112e5Revert "usb: dwc3: core: Enable GUCTL1 bit 10 for fixing termination error after resume bug"3447743ef1Revert "serial: 8250: Toggle IER bits on only after irq has been set up"b049ff121cMerge 5.15.75 into android13-5.15-lts4ec71a9ec7ANDROID: cpu/hotplug: call perf event through function pointer509a32764eLinux 5.15.787038af4ce9wifi: brcmfmac: Fix potential buffer overflow in brcmf_fweh_event_worker()b66617cc3cdrm/i915/sdvo: Setup DDC fully before output init73d52322c4drm/i915/sdvo: Filter out invalid outputs more sensibly2219b6aad3drm/rockchip: dsi: Force synchronous probedd955eb4e6drm/rockchip: dsi: Clean up 'usage_mode' when failing to attachcfa8a89af9cifs: fix regression in very old smb1 mounts3189de0ac3ext4,f2fs: fix readahead of verity dataa663e6ab17tee: Fix tee_shm_register() for kernel TEE driversd46db722a0KVM: x86: emulator: update the emulation mode after CR0 write942aec252bKVM: x86: emulator: update the emulation mode after rsm9df4bb7b38KVM: x86: emulator: introduce emulator_recalc_and_set_mode311f1e51a2KVM: x86: emulator: em_sysexit should update ctxt->mode37a03de2d0KVM: arm64: Fix bad dereference on MTE-enabled systems167dca5e21KVM: VMX: fully disable SGX if SECONDARY_EXEC_ENCLS_EXITING unavailable19c2b2ffbeKVM: x86: Mask off reserved bits in CPUID.8000001FH553fd40d3bKVM: x86: Mask off reserved bits in CPUID.80000001H006366b96cKVM: x86: Mask off reserved bits in CPUID.80000008Hfc796fd861KVM: x86: Mask off reserved bits in CPUID.8000001AHef7716398aKVM: x86: Mask off reserved bits in CPUID.80000006Ha88998446bx86/syscall: Include asm/ptrace.h in syscall_wrapper header999cff2b6cext4: fix BUG_ON() when directory entry has invalid rec_len0a43c015e9ext4: fix warning in 'ext4_da_release_space'ada82803a7parisc: Avoid printing the hardware path twice081ff43a77parisc: Export iosapic_serial_irq() symbol for serial port driver5daf985dd0parisc: Make 8250_gsc driver dependend on CONFIG_PARISC425fe99771perf/x86/intel: Fix pebs event constraints for SPR4613a45017perf/x86/intel: Add Cooper Lake stepping to isolation_ucodes[]7de3fe6a13perf/x86/intel: Fix pebs event constraints for ICL71d6c33fe2arm64: entry: avoid kprobe recursion52be536155efi: random: Use 'ACPI reclaim' memory for random seed83b5ec7ee8efi: random: reduce seed size to 32 bytes0417f70b85fuse: add file_modified() to fallocate2de8eec8afcapabilities: fix potential memleak on error path from vfs_getxattr_alloc()bd07f8067btracing/histogram: Update document for KEYS_MAX size27b4406f9ctools/nolibc/string: Fix memcmp() implementationb5074df412ring-buffer: Check for NULL cpu_buffer in ring_buffer_wake_waiters()85f3caa955kprobe: reverse kp->flags when arm_kprobe failedd1b6a8e341tracing: kprobe: Fix memory leak in test_gen_kprobe/kretprobe_cmd()828577e0batcp/udp: Make early_demux back namespacified.88561a6677ftrace: Fix use-after-free for dynamic ftrace_ops450d748070btrfs: fix type of parameter generation in btrfs_get_dentry007058eb82btrfs: fix tree mod log mishandling of reallocated nodes336fdd295cbtrfs: fix lost file sync on direct IO write with nowait and dsync iocbcff805b151fscrypt: fix keyring memory leak on mount failuree6f4fd85effscrypt: stop using keyrings subsystem for fscrypt_master_key3975affcf5af_unix: Fix memory leaks of the whole sk due to OOB skb.4302806dbfblock, bfq: protect 'bfqd->queued' by 'bfqd->lock'3e4697ffdfBluetooth: L2CAP: Fix attempting to access uninitialized memory81035e1201Bluetooth: L2CAP: Fix accepting connection request for invalid SPSMd78ccdce66i2c: piix4: Fix adapter not be removed in piix4_remove()c76ff8ae11arm64: dts: juno: Add thermal critical trip points7398435e61firmware: arm_scmi: Fix devres allocation device in virtio transport3653cdc21bfirmware: arm_scmi: Make Rx chan_setup fail on memory errorse514d67b23firmware: arm_scmi: Suppress the driver's bind attributes4e68c5da60block: Fix possible memory leak for rq_wb on add_disk failurebf822b6980arm64: dts: ls208xa: specify clock frequencies for the MDIO controllersf2329886e5arm64: dts: ls1088a: specify clock frequencies for the MDIO controllers33fcc55dbcarm64: dts: lx2160a: specify clock frequencies for the MDIO controllersf3429a1e49arm64: dts: imx8: correct clock orderde2a83186aARM: dts: imx6qdl-gw59{10,13}: fix user pushbutton GPIO offsetcb9ce8910aclk: qcom: Update the force mem core bit for GPU clocksbdc1182496efi/tpm: Pass correct address to memblock_reserve3a4d6f165ei2c: xiic: Add platform module alias62eea4014adrm/amdgpu: set vm_update_mode=0 as default for Sienna Cichlid in SRIOV case7a2547cac2HID: saitek: add madcatz variant of MMO7 mouse device ID931c97a54cscsi: core: Restrict legal sdev_state transitions via sysfsc50ec15725ACPI: APEI: Fix integer overflow in ghes_estatus_pool_init()8ecd1db58bmedia: v4l: subdev: Fail graciously when getting try data for NULL statef96ad391d0media: meson: vdec: fix possible refcount leak in vdec_probe()8b785cdcd3media: dvb-frontends/drxk: initialize err to 073dfb64213media: cros-ec-cec: limit msg.len to CEC_MAX_MSG_SIZEcbfa26936fmedia: s5p_cec: limit msg.len to CEC_MAX_MSG_SIZE647c12c47emedia: rkisp1: Zero v4l2_subdev_format fields in when validating linksabbeb8f727media: rkisp1: Use correct macro for gradient registers03b30e5a36media: rkisp1: Initialize color space on resizer sink and source padsd58b6b665cmedia: rkisp1: Don't pass the quantization to rkisp1_csm_config()0e501fd0f3s390/cio: fix out-of-bounds access on cio_ignore freec65cc56937s390/cio: derive cdev information only for IO-subchannelsc64be93f1es390/boot: add secure boot trailer1cdaca8f00s390/uaccess: add missing EX_TABLE entries to __clear_user()509cbbdec9mtd: parsers: bcm47xxpart: Fix halfblock reads5b8797e9dbmtd: parsers: bcm47xxpart: print correct offset on read error2f07635876fbdev: stifb: Fall back to cfb_fillrect() on 32-bit HCRX cards154934c74fvideo/fbdev/stifb: Implement the stifb_fillrect() functionb524b41806drm/msm/hdmi: fix IRQ lifetimec55dd62001drm/msm/hdmi: Remove spurious IRQF_ONESHOT flagd153d468c4vsock: fix possible infinite sleep in vsock_connectible_wait_data()0ed71af4d0ipv6: fix WARNING in ip6_route_net_exit_late()2b45d6d0c4net, neigh: Fix null-ptr-deref in neigh_table_clear()61defd6450net/smc: Fix possible leaked pernet namespace in smc_init()de88977427stmmac: dwmac-loongson: fix invalid mdio_node535b78739aibmvnic: Free rwi on reset success985a88bf0bnet: mdio: fix undefined behavior in bit shift for __mdiobus_registeraa16cac06bBluetooth: L2CAP: Fix memory leak in vhci_writea3a7b2ac64Bluetooth: L2CAP: fix use-after-free in l2cap_conn_del()cf2719a21fBluetooth: virtio_bt: Use skb_put to set length8278a87bb1Bluetooth: L2CAP: Fix use-after-free caused by l2cap_reassemble_sdu42d20d5e24netfilter: ipset: enforce documented limit to prevent allocating huge memoryf46ea5fa33btrfs: fix ulist leaks in error paths of qgroup self tests222a3d5330btrfs: fix inode list leak during backref walking at find_parent_nodes()6ba3479f9ebtrfs: fix inode list leak during backref walking at resolve_indirect_refs()a80634f392isdn: mISDN: netjet: fix wrong check of device registration029d5b7688mISDN: fix possible memory leak in mISDN_register_device()3e2129c67drose: Fix NULL pointer dereference in rose_send_frame()06d7596d18ipvs: fix WARNING in ip_vs_app_net_cleanup()5ee2d6b726ipvs: fix WARNING in __ip_vs_cleanup_batch()33e7783bc0ipvs: use explicitly signed chars6044791b7bnetfilter: nf_tables: release flow rule object from commit path1ffe710041netfilter: nf_tables: netlink notifier might race to release objectsdcc79cf735net: tun: fix bugs for oversize packet when napi frags enabledfc4b50adb4net: sched: Fix use after free in red_enqueue()ab80025ea7ata: pata_legacy: fix pdc20230_set_piomode()dede9ba027net: fec: fix improper use of NETDEV_TX_BUSY5dfdac5e3fnfc: nfcmrvl: Fix potential memory leak in nfcmrvl_i2c_nci_send()7486f5c900nfc: s3fwrn5: Fix potential memory leak in s3fwrn5_nci_send()3cba1f061bnfc: nxp-nci: Fix potential memory leak in nxp_nci_send()44bc1868a4nfc: fdp: Fix potential memory leak in fdp_nci_send()4bef9a89f2net: dsa: fall back to default tagger if we can't load the one from DT06f9e0b37fRDMA/qedr: clean up work queue on failure in qedr_alloc_resources()6b3d5dcb12RDMA/core: Fix null-ptr-deref in ib_core_cleanup()9f555b1584net: dsa: Fix possible memory leaks in dsa_loop_init()24641993a7nfs4: Fix kmemleak when allocate slot failed0797c85433NFSv4.2: Fixup CLONE dest file size for zero-length countd59722d088SUNRPC: Fix null-ptr-deref when xps sysfs alloc faileddea7ef05deNFSv4.1: We must always send RECLAIM_COMPLETE after a reboot7b1c2458deNFSv4.1: Handle RECLAIM_COMPLETE trunking errors4ec017e300NFSv4: Fix a potential state reclaim deadlocke3e53c5af5RDMA/hns: Disable local invalidate operation85ab79ac94RDMA/hns: Use hr_reg_xxx() instead of remaining roce_set_xxx()be16cc7abdRDMA/hns: Remove magic numberba95409d6bIB/hfi1: Correctly move list in sc_disable()484d969037RDMA/cma: Use output interface for net_dev checkf7d9de8a0dKVM: x86: Add compat handler for KVM_X86_SET_MSR_FILTERb7b66f13acKVM: x86: Copy filter arg outside kvm_vm_ioctl_set_msr_filter()0c60fa7f55KVM: x86: Protect the unused bits in MSR exiting flagsad8e4868ddHID: playstation: add initial DualSense Edge controller support3a44ae4afamm/hugetlb: fix races when looking up a CONT-PTE/PMD size hugetlb page8576d7edeadrm/amd/display: explicitly disable psr_feature_enable appropriately058b3a11f7KVM: x86: Treat #DBs from the emulator as fault-like (code and DR7.GD=1)9ee32892c7KVM: x86: Trace re-injected exceptions0c9c1306d6serial: ar933x: Deassert Transmit Enable on ->rs485_config()21d65b3516scsi: lpfc: Rework MIB Rx Monitor debug info logicd70705e131scsi: lpfc: Adjust CMF total bytes and rxmonitor9ebc6e8ad1scsi: lpfc: Adjust bytes received vales during cmf timer interval793d8378b7Linux 5.15.771401e9336btcp/udp: Fix memory leak in ipv6_renew_options().b079d37752serial: Deassert Transmit Enable on probe in driver-specific way63f75fea3aserial: core: move RS485 configuration tasks from drivers into core0753069d44can: rcar_canfd: rcar_canfd_handle_global_receive(): fix IRQ storm on global FIFO receive17ff99e224can: rcar_canfd: fix channel specific IRQ handling for RZ/G2Laad798a0b3scsi: sd: Revert "scsi: sd: Remove a local variable"52c2329147arm64: Add AMPERE1 to the Spectre-BHB affected list5397ea6a08net: enetc: survive memory pressure without crashing885a454e97kcm: do not sense pfmemalloc status in kcm_sendpage()92b4c5c3fanet: do not sense pfmemalloc status in skb_append_pagefrags()ae1b08592enet/mlx5: Fix crash during sync firmware reset37ada47d01net/mlx5: Update fw fatal reporter state on PCI handlers successful recover9e6523d06anet/mlx5: Print more info on pci error handlersab3de780c1net/mlx5: Fix possible use-after-free in async command interface8bbff203e3net/mlx5e: Extend SKB room check to include PTP-SQee1c0ca1afnet/mlx5e: Do not increment ESN when updating IPsec ESN stateeefa97a7a0netdevsim: remove dir in nsim_dev_debugfs_init() when creating ports dir failedc9589e18a6net: broadcom: bcm4908_enet: update TX stats after actual transmission9711616a49net: broadcom: bcm4908enet: remove redundant variable bytesb317d53680nh: fix scope used to find saddr when adding non gw nh2ad284ac88net: bcmsysport: Indicate MAC is in charge of PHY PMd1cfa71d5bnet: ehea: fix possible memory leak in ehea_register_port()588bdd7ee4openvswitch: switch from WARN to pr_warn9a1c1df925ALSA: aoa: Fix I2S device accountinge81d7826b8ALSA: aoa: i2sbus: fix possible memory leak in i2sbus_add_dev()77a754fcfenet: ethernet: ave: Fix MAC to be in charge of PHY PMbc2518ec71net: fec: limit register access on i.MX6ULf710deeea7perf vendor events arm64: Fix incorrect Hisi hip08 L3 metricseb59cb2fabPM: domains: Fix handling of unavailable/disabled idle statesbde7c2acefnet: ksz884x: fix missing pci_disable_device() on error in pcidev_init()8927d90d56i40e: Fix flow-type by setting GL_HASH_INSET registersc39de3ae50i40e: Fix VF hang when reset is triggered on another VF250bf8ab78i40e: Fix ethtool rx-flow-hash setting for X722ad3f1d9bf1ipv6: ensure sane device mtu in tunnelse2ec5bb78cperf vendor events power10: Fix hv-24x7 metric eventsf9df388ed6media: vivid: set num_in/outputs to 0 if not supported4cc7d8d420media: videodev2.h: V4L2_DV_BT_BLANKING_HEIGHT should check 'interlaced'491c0959f0media: v4l2-dv-timings: add sanity checks for blanking values0f83edbe4fmedia: vivid: dev->bitmap_cap wasn't freed in all cases5b1fb2a28dmedia: vivid: s_fbuf: add more sanity checks3436e56337PM: hibernate: Allow hybrid sleep to work with s2idle3cc8c4088fcan: mcp251x: mcp251x_can_probe(): add missing unregister_candev() in error patha3e09eff32can: mscan: mpc5xxx: mpc5xxx_can_probe(): add missing put_clock() in error path304a101616drm/amdkfd: Fix memory leak in kfd_mem_dmamap_userptr()2fe6b24ce2net-memcg: avoid stalls when under memory pressure9b171fdcbftcp: fix indefinite deferral of RTO with SACK reneginga85d39f14atcp: fix a signed-integer-overflow bug in tcp_add_backlog()2437f3c5c6tcp: minor optimization in tcp_add_backlog()ef27df7591net: lantiq_etop: don't free skb when returning NETDEV_TX_BUSYa1e18acb02net: fix UAF issue in nfqnl_nf_hook_drop() when ops_init() failed62086d1c46kcm: annotate data-races around kcm->rx_wait342d918cf9kcm: annotate data-races around kcm->rx_psock6bb23225bbatlantic: fix deadlock at aq_nic_stop4e2cbc1f0edrm/i915/dp: Reset frl trained flag before restarting FRL training3d92ab0865amd-xgbe: add the bit rate quirk for Molex cables75a6d1ebf8amd-xgbe: fix the SFP compliance codes check for DAC cables98bada8fa0x86/unwind/orc: Fix unreliable stack dump with gcov88e879c9f5nfc: virtual_ncidev: Fix memory leak in virtual_nci_send()18c60b383dnet: macb: Specify PHY PM management done by MAC95c22fc1e8net: hinic: fix the issue of double release MBOX callback of VF6016d96a6anet: hinic: fix the issue of CMDQ memory leakse6765fe8denet: hinic: fix memory leak when reading function table62aa78a0c3net: hinic: fix incorrect assignment issue in hinic_set_interrupt_cfg()1e0bee973enet: netsec: fix error handling in netsec_register_mdio()7a939503fctipc: fix a null-ptr-deref in tipc_topsrv_acceptc638b520baperf/x86/intel/lbr: Use setup_clear_cpu_cap() instead of clear_cpu_cap()4fdf6f978cALSA: ac97: fix possible memory leak in snd_ac97_dev_register()b688736903ASoC: qcom: lpass-cpu: Mark HDMI TX parity register as volatileeca851572dmtd: rawnand: intel: Add missing of_node_put() in ebu_nand_probe()08c246c7dfarc: iounmap() arg is volatile739eac37ffsched/core: Fix comparison in sched_group_cookie_match()ca7b0a1028perf: Fix missing SIGTRAPseb77474a2aASoC: qcom: lpass-cpu: mark HDMI TX registers as volatile9b6841ab70KVM: selftests: Fix number of pages for memory slot in memslot_modification_stress_test59de8738eddrm/msm: Fix return type of mdp4_lvds_connector_mode_valida560aeac2fmedia: atomisp: prevent integer overflow in sh_css_set_black_frame()32f93e4608media: v4l2: Fix v4l2_i2c_subdev_set_name function documentation5a93a8288cnet: ieee802154: fix error return code in dgram_bind()138a13d8f5ethtool: eeprom: fix null-deref on genl_info in dump1c2b1d3bbammc: block: Remove error check of hw_reset on reset0b0d169723Revert "scsi: lpfc: SLI path split: Refactor lpfc_iocbq"7a0fce24deRevert "scsi: lpfc: SLI path split: Refactor fast and slow paths to native SLI4"7a36c9de43Revert "scsi: lpfc: SLI path split: Refactor SCSI paths"eb8be2dbfbRevert "scsi: lpfc: Fix locking for lpfc_sli_iocbq_lookup()"065bf71a8aRevert "scsi: lpfc: Fix element offset in __lpfc_sli_release_iocbq_s4()"97dc9076eaRevert "scsi: lpfc: Resolve some cleanup issues following SLI path refactoring"b32b766be4s390/pci: add missing EX_TABLE entries to __pcistg_mio_inuser()/__pcilg_mio_inuser()1ad7213fcfs390/futex: add missing EX_TABLE entry to __futex_atomic_op()ae9398e837perf auxtrace: Fix address filter symbol name match for modules14009ada57ARC: mm: fix leakage of memory allocated for PTEeb9ed3343cpinctrl: Ingenic: JZ4755 bug fixes94d2643df1kernfs: fix use-after-free in __kernfs_removef1204dfc4ccounter: microchip-tcb-capture: Handle Signal1 read and Synapse6fb0106c64mmc: sdhci-esdhc-imx: Propagate ESDHC_FLAG_HS400* only on 8bit bus73e3901e70mmc: sdhci-pci-core: Disable ES for ASUS BIOS on Jasper Lake1e8cd93ae5mmc: core: Fix kernel panic when remove non-standard SDIO card02e51e7cd1mmc: sdhci_am654: 'select', not 'depends' REGMAP_MMIO4c365a0c21coresight: cti: Fix hang in cti_disable_hw()b32775e039drm/msm/dp: fix IRQ lifetimeb48949ab45drm/msm/hdmi: fix memory corruption with too many bridges9f035d1fb3drm/msm/dsi: fix memory corruption with too many bridges986a89b371drm/amdgpu: disallow gfxoff until GC IP blocks complete s2idle resumea2f0934e6bscsi: qla2xxx: Use transport-defined speed mask for supported_speeds2b1a3172eemac802154: Fix LQI recording46b4b1e11eexec: Copy oldsighand->action under spin-lock265b6fb780fs/binfmt_elf: Fix memory leak in load_elf_binary()24030742a7cpufreq: intel_pstate: hybrid: Use known scaling factor for P-cores3423a3417fcpufreq: intel_pstate: Read all MSRs on the target CPUcc6a724984fbdev: smscufx: Fix several use-after-free bugs1a8b22e3f3iio: adxl372: Fix unsafe buffer attributes2f08cad213iio: temperature: ltc2983: allocate iio channels once1bfe97f497iio: light: tsl2583: Fix module unloading569709540etools: iio: iio_utils: fix digit calculationc892a81c74xhci: Remove device endpoints from bandwidth list when freeing the devicedfacb5c7f0xhci-pci: Set runtime PM as default policy on all xHC 1.2 or later devices64058af657xhci: Add quirk to reset host back to default state at shutdown022f21e850mtd: rawnand: marvell: Use correct logic for nand-keep-configf90897c0f6usb: xhci: add XHCI_SPURIOUS_SUCCESS to ASM1042 despite being a V0.96 controllera0c54d5152usb: bdc: change state when port disconnectede0fd70ab48usb: dwc3: gadget: Don't set IMI for no_interruptad538aea64usb: dwc3: gadget: Stop processing more requests on IMIf2f53be617usb: gadget: uvc: fix sg handling during video encode80ff4ef777usb: gadget: uvc: fix sg handling in error case555011f6b2USB: add RESET_RESUME quirk for NVIDIA Jetson devices in RCM311428871bALSA: rme9652: use explicitly signed charfa8b39c7edALSA: au88x0: use explicitly signed char8af82d330dALSA: usb-audio: Add quirks for M-Audio Fast Track C400/600259cb4dee1ALSA: Use del_timer_sync() before freeing timer33ddee2b95can: kvaser_usb: Fix possible completions during init_completion86da269c75can: j1939: transport: j1939_session_skb_drop_old(): spin_unlock_irqrestore() before kfree_skb()ead0495627NFSv4: Add an fattr allocation to _nfs4_discover_trunking()eb1fe9600bNFSv4: Fix free of uninitialized nfs4_label on referral lookup.4f5365f770Linux 5.15.7633fc9e26b7mm: /proc/pid/smaps_rollup: fix no vma's null-derefb9d8cbe90ammc: core: Add SD card quirk for broken discard0ee2f0567aMakefile.debug: re-enable debug info for .S files117825e9bbx86/Kconfig: Drop check for -mabi=ms for CONFIG_EFI_STUB0983205085ACPI: video: Force backlight native for more TongFang devices289b56715bperf: Skip and warn on unknown format 'configN' attrs9d912a3853mmc: sdhci-tegra: Use actual clock rate for SW tuning correction7aeda81191tracing: Do not free snapshot if tracer is on cmdline57252e7bd4tracing: Simplify conditional compilation code in tracing_set_tracer()20bc6d23f7ksmbd: fix incorrect handling of iterate_dir3c8cfcaa2dksmbd: handle smb2 query dir request for OutputBufferLength that is too small8754fa5dbcarm64: mte: move register initialization to Cea7be82fd7fs: dlm: fix invalid derefence of sb_lvbptr0365d6af75iommu/vt-d: Clean up si_domain in the init_dmars() error path5c95d0c9d0iommu/vt-d: Allow NVS regions in arch_rmrr_sanity_check()209740fd13net: phy: dp83822: disable MDI crossover status change interruptce1234573dnet: sched: fix race condition in qdisc_graft()91f8f5342bnet: hns: fix possible memory leak in hnae_ae_register()50c31fa952wwan_hwsim: fix possible memory leak in wwan_hwsim_dev_new()d2fc83a6b5sfc: include vport_id in filter spec hash and equal()c2e1e59d59net: sched: sfb: fix null pointer access issue when sfb_init() fails34f2a4eedcnet: sched: delete duplicate cleanup of backlog and qlen154f4c06d9net: sched: cake: fix null pointer access issue when cake_init() fails5efed7578dnvmet: fix workqueue MEM_RECLAIM flushing dependency2f2b84b020nvme-hwmon: kmalloc the NVME SMART log buffer66c56b2328nvme-hwmon: consistently ignore errors from nvme_hwmon_initd77f6908f9netfilter: nf_tables: relax NFTA_SET_ELEM_KEY_END set flags requirementsefa9dd7e67ionic: catch NULL pointer issue on reconfig35ece85866net: hsr: avoid possible NULL deref in skb_clone()e326df21dadm: remove unnecessary assignment statement in alloc_dev()847301f0eecifs: Fix xid leak in cifs_ses_add_channel()8905d13b9ecifs: Fix xid leak in cifs_flock()27cfd3afaacifs: Fix xid leak in cifs_copy_file_range()593d877c39cifs: Fix xid leak in cifs_create()a8df9d0428udp: Update reuse->has_conns under reuseport_lock.9749595febscsi: lpfc: Fix memory leak in lpfc_create_port()b9122e0e0enet: phylink: add mac_managed_pm in phylink_config structure412db9b06dnet: phy: dp83867: Extend RX strap quirk for SGMII mode5ce6130519net/atm: fix proc_mpc_write incorrect return value0eb17faedcsfc: Change VF mac via PF as first preference if available.0f58940ca3HID: magicmouse: Do not set BTN_MOUSE on double report94a171c982i40e: Fix DMA mappings leakdbc01c0a4etipc: fix an information leak in tipc_topsrv_kern_subscrb294cad6f0tipc: Fix recognition of trial period6161c364e3ACPI: extlog: Handle multiple records40e5fceddfdrm/vc4: Add module dependency on hdmi-codec6c5041a103btrfs: fix processing of delayed tree block refs during backref walkingaf67578d56btrfs: fix processing of delayed data refs during backref walkingc439cafce8x86/topology: Fix duplicated core ID within a packaged31f4bc225x86/topology: Fix multiple packages shown on a single-package systemfcc96e89b3media: venus: dec: Handle the case where find_format failsb22b4823a0media: mceusb: set timeout to at least timeout provided5265cc1202media: ipu3-imgu: Fix NULL pointer dereference in active selection access1e4e71f9e1KVM: arm64: vgic: Fix exit condition in scan_its_table()5bf2fda26akvm: Add support for arch compat vm ioctls112a005d1dmm,hugetlb: take hugetlb_lock before decrementing h->resv_huge_pages2d508b4e65drm/amdgpu: fix sdma doorbell init ordering on APUsb5606e3ab1cpufreq: qcom: fix memory leak in error pathd866f5982cx86/resctrl: Fix min_cbm_bits for AMD8fbe13de1cata: ahci: Match EM_MAX_SLOTS with SATA_PMP_MAX_PORTS5d6a037b3aata: ahci-imx: Fix MODULE_ALIAS30cf0dee37hwmon/coretemp: Handle large core ID value2f7171465fx86/microcode/AMD: Apply the patch early on every logical thread93d7e2b47ai2c: qcom-cci: Fix ordering of pm_runtime_xx and i2c_add_adapter14d260f94fcpufreq: qcom: fix writes in read-only memory region3006766d24selinux: enable use of both GFP_KERNEL and GFP_ATOMIC in convert_context()1b31cb0065ocfs2: fix BUG when iput after ocfs2_mknod failse469db818eocfs2: clear dinode links count in case of errorded9d535bebtrfs: enhance unsupported compat RO flags handling537412c547perf/x86/intel/pt: Relax address filter validation8ddc58e0e3arm64: errata: Remove AES hwcap for COMPAT tasks738515cf8busb: gadget: uvc: improve sg exit conditiondb11d8c72ausb: gadget: uvc: giveback vb2 buffer on req completeaee340dccfusb: gadget: uvc: rework uvcg_queue_next_buffer to uvcg_complete_buffer2f54ce7392usb: gadget: uvc: use on returned header len in video_encode_isoc_sgd80db2f145usb: gadget: uvc: consistently use define for headerlenf9681a6750arm64/mm: Consolidate TCR_EL1 fields5b20aacff7r8152: add PID for the Lenovo OneLink+ Dockbd8a595958Linux 5.15.75b6e2c54be3io-wq: Fix memory leak in worker creation7c359e2849gcov: support GCC 12.1 and newer compilers8418c1672cthermal: intel_powerclamp: Use first online CPU as control_cpu55c824b620ext4: continue to expand file system when the target size doesn't reach0e63de6d7elib/Kconfig.debug: Add check for non-constant .{s,u}leb128 support to DWARF584cd0b20faKconfig.debug: add toolchain checks for DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT371aaf6b48Kconfig.debug: simplify the dependency of DEBUG_INFO_DWARF4/5e1591557e3drm/amd/display: Fix build breakage with CONFIG_DEBUG_FS=n34f31a2b66net/ieee802154: don't warn zero-sized raw_sendmsg()de904d0fe1Revert "net/ieee802154: reject zero-sized raw_sendmsg()"9c65eef9d6net: ethernet: ti: davinci_mdio: fix build for mdio bitbang usesd7eadffce0blk-wbt: fix that 'rwb->wc' is always set to 1 in wbt_init()28787ff9fbALSA: usb-audio: Fix last interface check for registrationb8989e95d7net: ieee802154: return -EINVAL for unknown addr type0db2efb3bfmm: hugetlb: fix UAF in hugetlb_handle_userfault98aada6e22io_uring/rw: fix unexpected link breakaged6b7efc722io_uring/rw: fix error'ed retry return valuese857457c6fio_uring/rw: fix short rw error handlingcd148d4e31io_uring: correct pinned_vm accounting813d8fe5d3io_uring/af_unix: defer registered files gc to io_uring releasec69a2324fcperf intel-pt: Fix segfault in intel_pt_print_info() with uClibce81bf40b28clk: bcm2835: Round UART input clock upda17cbb229clk: bcm2835: Make peripheral PLLC critical20b8c456dfusb: idmouse: fix an uninit-value in idmouse_openec8adf767envmet-tcp: add bounds check on Transfer Tag1c64328840nvme: copy firmware_rev on each initb9b5560b34ext2: Use kvmalloc() for group descriptor array8c067a3051scsi: tracing: Fix compile error in trace_array calls when TRACING is disabled39bef9c6a9staging: rtl8723bs: fix a potential memory leak in rtw_init_cmd_priv()b4573a2badstaging: rtl8723bs: fix potential memory leak in rtw_init_drv_sw()eb24d93e3eRevert "usb: storage: Add quirk for Samsung Fit flash"3a38985d8busb: dwc3: core: Enable GUCTL1 bit 10 for fixing termination error after resume bug9d4f84a15farm64: dts: imx8mp: Add snps,gfladj-refclk-lpm-sel quirk to USB nodes3c84c7f592usb: musb: Fix musb_gadget.c rxstate overflow bugfcd594da0busb: host: xhci: Fix potential memory leak in xhci_alloc_stream_info()9e86dffd0bmd/raid5: Wait for MD_SB_CHANGE_PENDING in raid5df8e80792c1eventfd: guard wake_up in eventfd fs calls as wellc61786dc72HID: roccat: Fix use-after-free in roccat_read()f7f425d61dsoundwire: intel: fix error handling on dai registration issues093a5463aesoundwire: cadence: Don't overwrite msg->buf during write commands1b4ed920b2bcache: fix set_at_max_writeback_rate() for multiple attached deviceseecb5ccc84ata: libahci_platform: Sanity check the DT child nodes number70b2adb1d6blk-throttle: prevent overflow while calculating wait timeff8551d411staging: vt6655: fix potential memory leak7c8bc37465power: supply: adp5061: fix out-of-bounds read in adp5061_get_chg_type()3d69461807iommu/arm-smmu-v3: Make default domain type of HiSilicon PTT device to identityc0d73be0afnbd: Fix hung when signal interrupts nbd_start_device_ioctl()9d54de8660scsi: 3w-9xxx: Avoid disabling device if failing to enable itd68da10b0cdmaengine: ti: k3-udma: Reset UDMA_CHAN_RT byte counters to prevent overflow518a2a1cc3usb: host: xhci-plat: suspend/resume clks for brcmf002aa7c0ausb: host: xhci-plat: suspend and resume clocks6bcd745c87clk: zynqmp: pll: rectify rate rounding in zynqmp_pll_round_rate5c32cbf6ccmedia: platform: fix some double free in meson-ge2d and mtk-jpeg and s5p-mfc6f21976095media: cx88: Fix a null-ptr-deref bug in buffer_prepare()0a07b13af0clk: zynqmp: Fix stack-out-of-bounds in strncpy`3680442cbaARM: 9242/1: kasan: Only map modules if CONFIG_KASAN_VMALLOC=n4a89c0befcbtrfs: don't print information about space cache or tree every remount39a07058c7btrfs: scrub: try to fix super block errorsf3857dd7c0btrfs: dump extra info if one free space cache has more bitmaps than it shouldd3c6d5be46arm64: dts: imx8mq-librem5: Add bq25895 as max17055's power supply82046b6a84kselftest/arm64: Fix validatation termination record after EXTRA_CONTEXT3536541733ARM: dts: imx6sx: add missing properties for sram602813650cARM: dts: imx6sll: add missing properties for sram6a12e1e23cARM: dts: imx6sl: add missing properties for sram8c24dc621bARM: dts: imx6qp: add missing properties for sram47666b9a11ARM: dts: imx6dl: add missing properties for sram19fe40c518ARM: dts: imx6q: add missing properties for sram9361ba7791ARM: dts: imx7d-sdb: config the max pressure for tsc20460f90671ff9drm/amd/display: Remove interface for periodic interrupt 188fd067406drm/dp: Don't rewrite link config when setting phy test pattern668806a826mmc: sdhci-msm: add compatible string check for sdm670587c7da877drm/meson: explicitly remove aggregate driver at module unload timed76ff04a72drm/meson: reorder driver deinit sequence to fix use-after-free bugd894db3561drm/amdgpu: fix initial connector audio valuee3675f688dASoC: SOF: pci: Change DMI match info to support all Chrome platformsf16e1b7b39platform/x86: msi-laptop: Change DMI match / alias strings to fix module autoloading39da49ffa2platform/chrome: cros_ec: Notify the PM of wake events during resume7463604784drm: panel-orientation-quirks: Add quirk for Anbernic Win6002810061452drm/vc4: vec: Fix timings for VEC modes0506c4eae9ALSA: usb-audio: Register card at the last interface39d7a81bbbdrm: bridge: dw_hdmi: only trigger hotplug event on link changedfbed8c92eudmabuf: Set ubuf->sg = NULL if the creation of sg table failsa47d92c74bdrm/amd/display: fix overflow on MIN_I64 definitiona29f742704gpu: lontium-lt9611: Fix NULL pointer dereference in lt9611_connector_init()5ff7bec678drm/komeda: Fix handling of atomic commits in the atomic_commit_tail hookca163e389fdrm: Prevent drm_copy_field() to attempt copying a NULL pointerdf5ac93926drm: Use size_t type for len variable in drm_copy_field()5ab84b1596drm/nouveau/nouveau_bo: fix potential memory leak in nouveau_bo_alloc()b3179865cfr8152: Rate limit overflow messagesd1e894f950Bluetooth: L2CAP: Fix user-after-free124b7c7732net: If sock is dead don't access sock's sk_wq in sk_stream_wait_memory5b94d48898hwmon: (sht4x) do not overflow clamping operation on 32-bit platformsa269c3e390wifi: rt2x00: correctly set BBP register 86 for MT7620b5e6ada5a5wifi: rt2x00: set SoC wmac clock register357c89074awifi: rt2x00: set VGC gain for both chains of MT762092e2e04da5wifi: rt2x00: set correct TX_SW_CFG1 MAC register for MT76204304b8e075wifi: rt2x00: don't run Rt5592 IQ calibration on MT76204a5eab200ecan: bcm: check the result of can_send() in bcm_can_tx()3423a50fa0Bluetooth: hci_sysfs: Fix attempting to call device_add multiple times3ac837cef1Bluetooth: L2CAP: initialize delayed works at l2cap_chan_create()af46b2b9b0wifi: mt76: mt7921: reset msta->airtime_ac while clearing up hw valuee33da263e9regulator: core: Prevent integer underflowd58c8781c0Bluetooth: btintel: Mark Intel controller to support LE_STATES quirk232d59eca0wifi: brcmfmac: fix use-after-free bug in brcmf_netdev_start_xmit()37f15edba2iavf: Fix race between iavf_close and iavf_reset_task0315568019xfrm: Update ipcomp_scratches with NULL when freed716c526d66thunderbolt: Add back Intel Falcon Ridge end-to-end flow control workaroundb1b4144508wifi: ath9k: avoid uninit memory read in ath9k_htc_rx_msg()839f563c5dx86/mce: Retrieve poison range from hardware1663629bc3tcp: annotate data-race around tcp_md5sig_pool_populated7b03296b4fopenvswitch: Fix overreporting of drops in dropwatchffd7a1dcaeopenvswitch: Fix double reporting of drops in dropwatchd449d00a8dnet: ethernet: ti: davinci_mdio: Add workaround for errata i2329624f03a027ice: set tx_tstamps when creating new Tx rings via ethtool2e52d858debpftool: Clear errno after libcap's checks75995ce1c9wifi: brcmfmac: fix invalid address access when enabling SCAN log level83b9496975NFSD: fix use-after-free on source server when doing inter-server copy118dc74b2bNFSD: Return nfserr_serverfault if splice_ok but buf->pages have data066b1302f2x86/entry: Work around Clang __bdos() bug06c56c9d5dACPI: x86: Add a quirk for Dell Inspiron 14 2-in-1 for StorageD3Enable6733222f2cARM: decompressor: Include .data.rel.ro.local5614908434thermal: intel_powerclamp: Use get_cpu() instead of smp_processor_id() to avoid crash139bbbd011powercap: intel_rapl: fix UBSAN shift-out-of-bounds issuea1387ae83eMIPS: BCM47XX: Cast memcmp() of function to (void *)c2790fede9cpufreq: intel_pstate: Add Tigerlake support in no-HWP mode30eca146c8ACPI: tables: FPDT: Don't call acpi_os_map_memory() on invalid phys address5374638222ACPI: video: Add Toshiba Satellite/Portege Z830 quirk7ed95b0803rcu-tasks: Convert RCU_LOCKDEP_WARN() to WARN_ONCE()cf38a05eb1rcu: Back off upon fill_page_cache_func() allocation failure3e2d8b89f0rcu: Avoid triggering strict-GP irq-work when RCU is idle27d3e646ddfs: dlm: fix race in lowcommsb6b87460f4selftest: tpm2: Add Client.__del__() to close /dev/tpm* handle497d736784f2fs: fix to account FS_CP_DATA_IO correctlyfb1dcc2a9ef2fs: fix race condition on setting FI_NO_EXTENT flag6ddbd411a0ACPI: APEI: do not add task_work to kernel thread to avoid memory leak21f1ba52b8thermal/drivers/qcom/tsens-v0_1: Fix MSM8939 fourth sensor hw_id172c8a24fccrypto: cavium - prevent integer overflow loading firmware12acfa1059crypto: marvell/octeontx - prevent integer overflowsc963ce2fa0kbuild: rpm-pkg: fix breakage when V=1 is used059ce6b68bkbuild: remove the target in signal traps when interrupted1e9c23db31tracing/osnoise: Fix possible recursive locking in stop_per_cpu_kthreads84795de93etracing: kprobe: Make gen test module work in arm and riscv867fce09aatracing: kprobe: Fix kprobe event gen test module on exita9990f24adiommu/iova: Fix module config properlyf0cac6cc02cifs: return correct error in ->calc_signature()1f1ab76e25crypto: qat - fix DMA transfer direction393307b99acrypto: inside-secure - Change swab to swab3293538944abcrypto: ccp - Release dma channels before dmaengine unrgister779a9930f3crypto: akcipher - default implementation for setting a private key0c7043a5b5iommu/omap: Fix buffer overflow in debugfs046803b74dcgroup/cpuset: Enable update_tasks_cpumask() on top_cpuset771d8aa02dcrypto: hisilicon/qm - fix missing put dfx access9bf3ec61a2crypto: qat - fix default value of WDT timer3bfc220e5chwrng: imx-rngc - Moving IRQ handler registering after imx_rngc_irq_mask_clear()507128a0e3cgroup: Honor caller's cgroup NS when resolving path8ffe511b7dhwrng: arm-smccc-trng - fix NO_ENTROPY handling2720934713crypto: hisilicon/zip - fix mismatch in get/set sgl_sge_nre0b4ebf598crypto: sahara - don't sleep when in softirq8484023b57powerpc/pseries/vas: Pass hw_cpu_id to node associativity HCALL7f536a8cb6powerpc/kprobes: Fix null pointer reference in arch_prepare_kprobe()1f98f8f435powerpc: Fix SPE Power ISA properties for e500v1 platforms72c5b7110fpowerpc/64s: Fix GENERIC_CPU build flags for PPC970 / G5399afe92f6x86/hyperv: Fix 'struct hv_enlightened_vmcs' definition592b302d8bpowerpc: Fix fallocate and fadvise64_64 compat parameter combination61af84b3dbpowerpc/powernv: add missing of_node_put() in opal_export_attrs()5be9cb6c06powerpc/pci_dn: Add missing of_node_put()5a13d3f1afpowerpc/sysdev/fsl_msi: Add missing of_node_put()b0c0490b3cpowerpc/math_emu/efp: Include module.h93379dc92dpowerpc/configs: Properly enable PAPR_SCM in pseries_defconfig25a4fb0e1amailbox: bcm-ferxrm-mailbox: Fix error check for dma_map_sgb8fcd9ab0fmailbox: mpfs: account for mbox offsets while sendingba22643595mailbox: mpfs: fix handling of the reg propertyfad007a315clk: ast2600: BCLK comes from EPLL3441076f83clk: ti: dra7-atl: Fix reference leak in of_dra7_atl_clk_probe9209e6bab7clk: imx: scu: fix memleak on platform_device_add() failsbdf72f2d64clk: bcm2835: fix bcm2835_clock_rate_from_divisor declaratione338131e98clk: baikal-t1: Add SATA internal ref clock buffer35b7660275clk: baikal-t1: Add shared xGMAC ref/ptp clocks internal parentb2db8b2c53clk: baikal-t1: Fix invalid xGMAC PTP clock divider435a8a39c6clk: vc5: Fix 5P49V6901 outputs disabling when enabling FODb0bc75fe67spmi: pmic-arb: correct duplicate APID to PPID mapping logicfaabbb103dusb: mtu3: fix failed runtime suspend in host only mode57f66534a4dmaengine: ioat: stop mod_timer from resurrecting deleted timer in __cleanup()8aa96c5bc3clk: mediatek: mt8183: mfgcfg: Propagate rate changes to parent2dafc5afd9mfd: sm501: Add check for platform_driver_register()d43d93dbd8mfd: fsl-imx25: Fix check for platform_get_irq() errorsb940bb3c81mfd: lp8788: Fix an error handling path in lp8788_irq_init() and lp8788_irq_init()0715005c48mfd: lp8788: Fix an error handling path in lp8788_probe()aec1f073f9mfd: fsl-imx25: Fix an error handling path in mx25_tsadc_setup_irq()53bfc1c3c7mfd: intel_soc_pmic: Fix an error handling path in intel_soc_pmic_i2c_probe()2f921d62c2fsi: core: Check error number after calling ida_simple_get041c79f6aeRDMA/rxe: Fix resize_finish() in rxe_queue.c959d4ee095clk: qcom: gcc-sm6115: Override default Alpha PLL regs8e556f5573clk: qcom: apss-ipq6018: mark apcs_alias0_core_clk as criticala26b065875scsi: iscsi: iscsi_tcp: Fix null-ptr-deref while calling getpeername()e87fb1fcf8scsi: iscsi: Run recv path from workqueuec2af03a7c1scsi: iscsi: Add recv workqueue helpersd6aafc21bescsi: iscsi: Rename iscsi_conn_queue_work()e45a1516d2scsi: libsas: Fix use-after-free bug in smp_execute_task_sg()6a54f76974serial: 8250: Fix restoring termios speed after suspenda5dba09338firmware: google: Test spinlock on panic path to avoid lockups60d14575d0slimbus: qcom-ngd-ctrl: allow compile testing without QCOM_RPROC_COMMONf19e5b7df5staging: vt6655: fix some erroneous memory clean-up loops433c33c554phy: qualcomm: call clk_disable_unprepare in the error handlingc4293def88tty: serial: fsl_lpuart: disable dma rx/tx use flags in lpuart_dma_shutdowna91a3c2d8dserial: 8250: Toggle IER bits on only after irq has been set up6be8e565a4drivers: serial: jsm: fix some leaks in probe1d05df7757usb: gadget: function: fix dangling pnp_string in f_printer.ced2c66b752xhci: Don't show warning for reinit on known broken suspend4d7d8f5cb2IB: Set IOVA/LENGTH on IB_MR in core/uverbs layerse221b4f16eRDMA/cm: Use SLID in the work completion as the DLID in responder side7a37c58ee7md/raid5: Remove unnecessary bio_put() in raid5_read_one_chunk()b467d9460emd/raid5: Ensure stripe_fill happens on non-read IO with journal5d8259c9d1md: Replace snprintf with scnprintf9e92d5ca54mtd: rawnand: meson: fix bit map use in meson_nfc_ecc_correct()058833dbebata: fix ata_id_has_dipm()dad910a6d4ata: fix ata_id_has_ncq_autosense()21faddeff7ata: fix ata_id_has_devslp()204cc767dcata: fix ata_id_sense_reporting_enabled() and ata_id_has_sense_reporting()5c75d608faRDMA/siw: Fix QP destroy to wait for all references dropped.308cd50f17RDMA/siw: Always consume all skbuf data in sk_data_ready() upcall.e58a0b9100RDMA/srp: Fix srp_abort()dc9e4ef6b0RDMA/irdma: Align AE id codes to correct flush code and event84ce1a8e36mtd: rawnand: fsl_elbc: Fix none ECC modebe424a7d53mtd: rawnand: intel: Remove undocumented compatible string445395900bmtd: rawnand: intel: Read the chip-select line from the correct OF nodecbbf9cca47phy: phy-mtk-tphy: fix the phy type setting issuee4be7c9495phy: amlogic: phy-meson-axg-mipi-pcie-analog: Hold reference returned by of_get_parent()88263152ffmtd: devices: docg3: check the return value of devm_ioremap() in the probea0e4ac6988clk: qcom: sm6115: Select QCOM_GDSCaecb632674dyndbg: drop EXPORTed dynamic_debug_exec_queries0d4421f2cbdyndbg: let query-modname override actual module name0c0d9f38b0dyndbg: fix module.dyndbg handling49d85932f7dyndbg: fix static_branch manipulation7cb9b20941dmaengine: hisilicon: Add multi-thread support for a DMA channelb88630d9aadmaengine: hisilicon: Fix CQ head updatee84aeeafe8dmaengine: hisilicon: Disable channels when unregister hisi_dmab94605f5cbfpga: prevent integer overflow in dfl_feature_ioctl_set_irq()11bd8bbdf8misc: ocxl: fix possible refcount leak in afu_ioctl()c23c5e1845RDMA/rxe: Fix the error caused by qp->skf2f405af70RDMA/rxe: Fix "kernel NULL pointer dereference" error2ea7caa968media: xilinx: vipp: Fix refcount leak in xvip_graph_dma_init23624abbc9media: uvcvideo: Use entity get_cur in uvc_ctrl_set6c5da92103media: uvcvideo: Fix memory leak in uvc_gpio_parse4e2042f1admedia: meson: vdec: add missing clk_disable_unprepare on error in vdec_hevc_start()aeffca4344tty: xilinx_uartps: Fix the ignore_statusa8d772c7b8media: exynos4-is: fimc-is: Add of_node_put() when breaking out of loop6225501072HSI: omap_ssi_port: Fix dma_map_sg error check691f23a847HSI: omap_ssi: Fix refcount leak in ssi_probed6e750535bclk: tegra20: Fix refcount leak in tegra20_clock_inite7a57fb92aclk: tegra: Fix refcount leak in tegra114_clock_init417ed4432bclk: tegra: Fix refcount leak in tegra210_clock_initca5f338ef1clk: sprd: Hold reference returned by of_get_parent()49343bdf95clk: berlin: Add of_node_put() for of_get_parent()857b719bedclk: qoriq: Hold reference returned by of_get_parent()a8cbce0305clk: oxnas: Hold reference returned by of_get_parent()e0001a565cclk: meson: Hold reference returned by of_get_parent()e900ec4c4fusb: common: debug: Check non-standard control requestsc11f48764cRDMA/mlx5: Don't compare mkey tags in DEVX indirect mkeycd35ad9a7diio: magnetometer: yas530: Change data type of hard_offsets to signed23fafc2e2ciio: ABI: Fix wrong format of differential capacitance channel ABI.8169da520eiio: inkern: fix return value in devm_of_iio_channel_get_by_name()504e8807feiio: inkern: only release the device node when done with itb0d4fcc3eciio: adc: at91-sama5d2_adc: disable/prepare buffer on suspend/resume5db9b840aciio: adc: at91-sama5d2_adc: lock around oversampling and sample freqc5c63736d2iio: adc: at91-sama5d2_adc: check return status for pressure and touch5f1654a0e5iio: adc: at91-sama5d2_adc: fix AT91_SAMA5D2_MR_TRACKTIM_MAX017cf3b0a6ARM: dts: exynos: fix polarity of VBUS GPIO of Origen6c93b683cearm64: ftrace: fix module PLTs with mcountbbf64eb102ext4: don't run ext4lazyinit for read-only filesystems7a00a23207ARM: Drop CMDLINE_* dependency on ATAGS2af04fe87eARM: dts: exynos: correct s5k6a3 reset polarity on Midas family2134214bc4arm64: dts: ti: k3-j7200: fix main pinmux range7247a1d7a4soc/tegra: fuse: Drop Kconfig dependency on TEGRA20_APB_DMA4f7892f242ia64: export memory_add_physaddr_to_nid to fix cxl build error2ef01657b2ARM: dts: kirkwood: lsxl: remove first ethernet portbf7caa3c5cARM: dts: kirkwood: lsxl: fix serial line42ce4c73a4ARM: dts: turris-omnia: Fix mpp26 pin name and comment96d8f2b43eARM: dts: imx6qdl-kontron-samx6i: hook up DDC i2c bus08ada28d1dsoc: qcom: smem_state: Add refcounting for the 'state->of_node'96e0028debsoc: qcom: smsm: Fix refcount leak bugs in qcom_smsm_probe()a29b6eb959locks: fix TOCTOU race when granting write lease7e053784c4memory: of: Fix refcount leak bug in of_lpddr3_get_ddr_timings()2680690f9cmemory: of: Fix refcount leak bug in of_get_ddr_timings()566b143aa5memory: pl353-smc: Fix refcount leak bug in pl353_smc_probe()10df962300ALSA: hda/hdmi: Don't skip notification handling during PM operationcc756b79a5ASoC: mt6660: Fix PM disable depth imbalance in mt6660_i2c_probef9cb3bd557ASoC: wm5102: Fix PM disable depth imbalance in wm5102_probeb7dda65fa8ASoC: wm5110: Fix PM disable depth imbalance in wm5110_probeb2bc9fc56aASoC: wm8997: Fix PM disable depth imbalance in wm8997_probe3c3ef19a88mmc: wmt-sdmmc: Fix an error handling path in wmt_mci_probe()b14dc26227ALSA: dmaengine: increment buffer pointer atomicallyf5f1f5ee50ASoC: da7219: Fix an error handling path in da7219_register_dai_clks()f910aca076ASoC: codecs: tx-macro: fix kcontrol putb47a37ad4adrm/vmwgfx: Fix memory leak in vmw_mksstat_add_ioctl()bdf54d4b00drm/msm/dp: correct 1.62G link rate at dp_catalog_ctrl_config_msa()635e7700c5drm/msm/dpu: index dpu_kms->hw_vbif using vbif_idx4f85988467ASoC: eureka-tlv320: Hold reference returned from of_find_xxx API64545b8a96mmc: au1xmmc: Fix an error handling path in au1xmmc_probe()3ba3814c00drm/amdgpu: Fix memory leak in hpd_rx_irq_create_workqueue()a5ce83e85ddrm/omap: dss: Fix refcount leak bugsf5f599daa0drm/bochs: fix blanking928ac9fc1aALSA: hda: beep: Simplify keep-power-at-enable behaviorfbb88a7c84ASoC: rsnd: Add check for rsnd_mod_power_on4610e7a411drm/bridge: megachips: Fix a null pointer dereference bug079c550c57drm/amdgpu: add missing pci_disable_device() in amdgpu_pmops_runtime_resume()c12daccc90platform/chrome: cros_ec_typec: Correct alt mode indexc317d2b8a4platform/x86: msi-laptop: Fix resource cleanup0e21d41bc7platform/x86: msi-laptop: Fix old-ec check for backlight registering6bc81c1b63ASoC: tas2764: Fix mute/unmutee644497c53ASoC: tas2764: Drop conflicting set_bias_level power setting35bd912ed6ASoC: tas2764: Allow mono streamsfd1d3b2657platform/chrome: fix memory corruption in ioctl27bb672c04platform/chrome: fix double-free in chromeos_laptop_prepare()57dfb855bcASoC: mt6359: fix tests for platform_get_irq() failure8a475a7732drm:pl111: Add of_node_put() when breaking out of for_each_available_child_of_node()56d2233cf5drm/dp_mst: fix drm_dp_dpcd_read return value checksfe6eb3d0c8drm/bridge: parade-ps8640: Fix regulator supply order60630834fadrm/virtio: Correct drm_gem_shmem_get_sg_table() error handling26c1b4cfe5drm/mipi-dsi: Detach devices when removing the host652042135edrm/bridge: Avoid uninitialized variable warningf369fb4deedrm: bridge: adv7511: unregister cec i2c device after cec adapter20609125b8drm: bridge: adv7511: fix CEC power down control register offseta624161ebenet: mvpp2: fix mvpp2 debugfs leak7aef5082c5once: add DO_ONCE_SLOW() for sleepable contexts77bfd26cbbnet/ieee802154: reject zero-sized raw_sendmsg()dc4e9cd6d6net: wwan: iosm: Call mutex_init before locking it0b6516a4e3bnx2x: fix potential memory leak in bnx2x_tpa_stop()30bfa5aa72net: rds: don't hold sock lock when cancelling work from rds_tcp_reset_callbacks()f828333ca9hwmon: (pmbus/mp2888) Fix sensors readouts for MPS Multi-phase mp2888 controllerc91b922b41spi: Ensure that sg_table won't be used after being freed49d429760dtcp: fix tcp_cwnd_validate() to not forget is_cwnd_limited19d636b663sctp: handle the error returned from sctp_auth_asoc_init_active_key7bfa18b05fmISDN: fix use-after-free bugs in l1oip timer handlers6f1991a940eth: alx: take rtnl_lock on resumee28a4e7f02vhost/vsock: Use kvmalloc/kvfree for larger packets.5dbdd690edwifi: rtl8xxxu: Fix AIFS written to REG_EDCA_*_PARAM432eecffcfspi: s3c64xx: Fix large transfers with DMA1454a26cb1netfilter: nft_fib: Fix for rpath check with VRF devices7d98b26684xfrm: Reinject transport-mode packets through workqueue397e880acfBluetooth: hci_core: Fix not handling link timeouts propertly1331d3e1f9i2c: mlxbf: support lock mechanism9233ab8198skmsg: Schedule psock work if the cached skb exists on the psock44f1dc2e82spi/omap100k:Fix PM disable depth imbalance in omap1_spi100k_probedaa5239ea4spi: dw: Fix PM disable depth imbalance in dw_spi_bt1_probe6b94115186x86/cpu: Include the header of init_ia32_feat_ctl()'s prototype3c27a13807x86/microcode/AMD: Track patch allocation size explicitly3e2b805a68wifi: ath11k: fix number of VHT beamformee spatial streams5a6827cdc2netfilter: conntrack: revisit the gc initial rescheduling bias9c39ca418bnetfilter: conntrack: fix the gc rescheduling delayb8917dce21Bluetooth: hci_{ldisc,serdev}: check percpu_init_rwsem() failurec087c35292bpf: Ensure correct locking around vulnerable function find_vpid()a0f15af17bnet: fs_enet: Fix wrong check in do_pd_setupee7c5e814fBluetooth: RFCOMM: Fix possible deadlock on socket shutdown/release57d4f2f8a6wifi: mt76: mt7915: do not check state before configuring implicit beamformdea9093f24wifi: mt76: mt7615: add mt7615_mutex_acquire/release in mt7615_sta_set_decap_offload817e8b75aewifi: mt76: sdio: fix transmitting packet hangs5dc095a37fwifi: rtl8xxxu: Remove copy-paste leftover in gen2_update_rate_mask9973f78c19wifi: rtl8xxxu: gen2: Fix mistake in path B IQ calibration5d9222c680bpf: btf: fix truncated last_member_type_id in btf_struct_resolve4ce47c5545spi: meson-spicc: do not rely on busy flag in pow2 clk ops36c484bac9wifi: rtl8xxxu: Fix skb misuse in TX queue selectionfefd2269e6spi: qup: add missing clk_disable_unprepare on error in spi_qup_pm_resume_runtime()e22f649918spi: qup: add missing clk_disable_unprepare on error in spi_qup_resume()37005a9486selftests/xsk: Avoid use-after-free on ctx69995c64e5wifi: rtw88: add missing destroy_workqueue() on error path in rtw_core_init()6f9484e969wifi: rtl8xxxu: tighten bounds checking in rtl8xxxu_read_efuse()d091771f51Bluetooth: btusb: mediatek: fix WMT failure during runtime suspendf91e25cfa5bpf: Use this_cpu_{inc|dec|inc_return} for bpf_task_storage_busy0e13425104bpf: Propagate error from htab_lock_bucket() to userspace0b00c6130cbpf: Disable preemption when increasing per-cpu map_locked68ab769033xsk: Fix backpressure mechanism on Tx0559a6d96ax86/resctrl: Fix to restore to original value when re-enabling hardware prefetch registere962e458bfspi: mt7621: Fix an error message in mt7621_spi_probe()0a16bbc8b0bpftool: Fix a wrong type cast in btf_dumper_int6e8eadfa9bwifi: mac80211: allow bw change during channel switch in mesh4ed5155043bpf: Fix reference state management for synchronous callbacks3d0a101e71leds: lm3601x: Don't use mutex after it was destroyed54a3201f3cwifi: ath10k: add peer map clean up for peer delete in ath10k_sta_state()714536ff6fwifi: rtlwifi: 8192de: correct checking of IQK reload80a474502eNFSD: Fix handling of oversized NFSv4 COMPOUND requestsdc7f225090NFSD: Protect against send buffer overflow in NFSv2 READDIRcedaf73c8bSUNRPC: Fix svcxdr_init_encode's buflen calculation6b55707ff8SUNRPC: Fix svcxdr_init_decode's end-of-buffer calculationaed8816305nfsd: Fix a memory leak in an error handling path5c4b234c44objtool: Preserve special st_shndx indexes in elf_update_symbol425a2a9469ARM: 9247/1: mm: set readonly for MT_MEMORY_RO with ARM_LPAE2647b20e04ARM: 9244/1: dump: Fix wrong pg_level in walk_pmd()93296e7ab7MIPS: SGI-IP27: Fix platform-device leak in bridge_platform_create()993b13abdeMIPS: SGI-IP27: Free some unused memory959855093fsh: machvec: Use char[] for section boundaries91fafd22f8thermal: cpufreq_cooling: Check the policy first in cpufreq_cooling_register()81fb3ee298ntfs3: rework xattr handlers and switch to POSIX ACL VFS helpers33d478eee2userfaultfd: open userfaultfds with O_RDONLY10918ebecdima: fix blocking of security.ima xattrs of unsupported algorithmsb7af9b8be8selinux: use "grep -E" instead of "egrep"73b8218ef4smb3: must initialize two ACL struct fields to zeroadf428ae46drm/amd/display: Fix vblank refcount in vrr transition60a5174525drm/i915: Fix watermark calculations for gen12+ CCS+CC modifier01bd3eaa53drm/i915: Fix watermark calculations for gen12+ MC CCS modifier20018a252fdrm/i915: Fix watermark calculations for gen12+ RC CCS modifier861f085f81drm/nouveau: fix a use-after-free in nouveau_gem_prime_import_sg_table()446d40e2a8drm/nouveau/kms/nv140-: Disable interlacing4dab0d27a4staging: greybus: audio_helper: remove unused and wrong debugfs usage28eb4bdb23KVM: VMX: Drop bits 31:16 when shoving exception error code into VMCS4f7b1e7d0fKVM: nVMX: Don't propagate vmcs12's PERF_GLOBAL_CTRL settings to vmcs02be1a6a61f1KVM: nVMX: Unconditionally purge queued/injected events on nested "exit"379de01906KVM: x86/emulator: Fix handing of POP SS to correctly set interruptibilitye3e5baa368blk-wbt: call rq_qos_add() after wb_normal is initializede8e0a6f4b8media: cedrus: Fix endless loop in cedrus_h265_skip_bits()b76fac61c3media: cedrus: Set the platform driver data earlierb19254eadaefi: libstub: drop pointless get_memory_map() call5cda4a11b4thunderbolt: Explicitly enable lane adapter hotplug events at startupd9c79fbcbdtracing: Fix reading strings from synthetic eventsb9ab154d22tracing: Add "(fault)" name injection to kernel probes8ae88c4842tracing: Move duplicate code of trace_kprobe/eprobe.c into header84f4be2093tracing: Add ioctl() to force ring buffer waiters to wake up32eb54a986tracing: Wake up waiters when tracing is disabled2475de2bc0tracing: Wake up ring buffer waiters on closing of the file48272aa48dtracing: Disable interrupt or preemption before acquiring arch_spinlock_td4ab9bc5f5ring-buffer: Fix race between reset page and reading pagebe60f698c2ring-buffer: Add ring_buffer_wake_waiters()5201dd81aering-buffer: Check pending waiters when doing wake ups as wellbc6d4e9d64ring-buffer: Have the shortest_full queue be the shortest not longeste8d1167385ring-buffer: Allow splice to read previous partially read pagesfb96b7489fftrace: Properly unset FTRACE_HASH_FL_MOD31dc1727c1livepatch: fix race between fork and KLP transition36997b75bbext4: update 'state->fc_regions_size' after successful memory allocation417b0455a0ext4: fix potential memory leak in ext4_fc_record_regions()9b5eb368a8ext4: fix potential memory leak in ext4_fc_record_modified_inode()ef1607c991ext4: fix miss release buffer head in ext4_fc_write_inoded29fa1ab4eext4: fix dir corruption when ext4_dx_add_entry() failsd12471b416ext4: place buffer head allocation before handle start46e5f470a1ext4: ext4_read_bh_lock() should submit IO if the buffer isn't uptodate1f5e643b38ext4: don't increase iversion counter for ea_inodesdd366295d1ext4: fix check for block being out of directory size4a967fe8b0ext4: make ext4_lazyinit_thread freezable533c60a0b9ext4: fix null-ptr-deref in ext4_write_infod8e4af8314ext4: avoid crash when inline data creation follows DIO write56fcd0788fjbd2: add miss release buffer head in fc_do_one_pass()d11d2ded29jbd2: fix potential use-after-free in jbd2_fc_wait_bufse7385c868ejbd2: fix potential buffer head reference count leakd87fe290a5jbd2: wake up journal waiters in FIFO order, not LIFO7434626c5ehardening: Remove Clang's enable flag for -ftrivial-auto-var-init=zero095493833bhardening: Avoid harmless Clang option under CONFIG_INIT_STACK_ALL_ZERO73687c5391f2fs: fix to do sanity check on summary infoed854f10e6f2fs: fix to do sanity check on destination blkaddr during recovery7f10357c90f2fs: increase the limit for reserve_root0035b84223f2fs: flush pending checkpoints when freezing superab49589754f2fs: complete checkpoints during remount0a408c6212btrfs: set generation before calling btrfs_clean_tree_block in btrfs_init_new_buffer4b996a3014btrfs: fix race between quota enable and quota rescan ioctl0d94230343fs: record I_DIRTY_TIME even if inode already has I_DIRTY_INODE95a520b591ksmbd: Fix user namespace mappinga19f316406ksmbd: Fix wrong return value and message length check in smb2_ioctl()39b6855628ksmbd: fix endless loop when encryption for response fails2b0897e336fbdev: smscufx: Fix use-after-free in ufx_ops_open()aa7b2c927epinctrl: rockchip: add pinmux_ops.gpio_set_direction callback5d97378b36gpio: rockchip: request GPIO mux to pinctrl when setting directione0b1c16fdascsi: qedf: Populate sysfs attributes for vport1d567179f2slimbus: qcom-ngd: cleanup in probe error pathfa0aab2e45slimbus: qcom-ngd: use correct error in message of pdr_add_lookup() failureba2159df18powerpc/boot: Explicitly disable usage of SPE instructions9df2a9cdadpowercap: intel_rapl: Use standard Energy Unit for SPR Dram RAPL domain75d9de25a6NFSD: Protect against send buffer overflow in NFSv3 READ2be9331ca6NFSD: Protect against send buffer overflow in NFSv2 READ071a076fd1NFSD: Protect against send buffer overflow in NFSv3 READDIR209a94c519serial: 8250: Request full 16550A feature probing for OxSemi PCIe devices63a3d75cf1serial: 8250: Let drivers request full 16550A feature probing26e5c79e67PCI: Sanitise firmware BAR assignments behind a PCI-PCI bridge7c16d0a4e6xen/gntdev: Accommodate VMA splitting1cb73704cbxen/gntdev: Prevent leaking grants43bed0a13amm/mmap: undo ->mmap() when arch_validate_flags() fails2b0072d33emm/damon: validate if the pmd entry is present before accessing91c4eb16e8arm64: errata: Add Cortex-A55 to the repeat tlbi listfc0f921b7edrm/udl: Restore display mode on resume0640934725drm/virtio: Use appropriate atomic state in virtio_gpu_plane_cleanup_fb()fb3910436bdrm/virtio: Unlock reservations on virtio_gpu_object_shmem_init() errorf122bcb34fdrm/virtio: Check whether transferred 2D BO is shmema95fb5d55admaengine: mxs: use platform_driver_registere7a3334e83Revert "drm/amdgpu: use dirty framebuffer helper"4bdedc3b53nvme-pci: set min_align_mask before calculating max_hw_sectors32aa0b3f0cnvme-multipath: fix possible hang in live ns resize with ANA access9391cc3a78nvmem: core: Fix memleak in nvmem_register()7efe61dc6aUM: cpuinfo: Fix a warning for CONFIG_CPUMASK_OFFSTACK81ab826a28riscv: Pass -mno-relax only on lld < 15.0.07780bb02a0riscv: always honor the CONFIG_CMDLINE_FORCE when parsing dtbc657b70e80riscv: Make VM_WRITE imply VM_READ3c3c4fa118riscv: Allow PROT_WRITE-only mmap()af3aaee08dparisc: fbdev/stifb: Align graphics memory size to 4MBdc235db7b7RISC-V: Make port I/O string accessors actually work8c487db000riscv: topology: fix default topology reportingd46c24f307arm64: topology: move store_cpu_topology() to shared codefcf0f6cbb6regulator: qcom_rpm: Fix circular deferral regression78d81a8a8cnet: thunderbolt: Enable DMA paths only after rings are enabled3281e81ce9hwmon: (gsc-hwmon) Call of_node_get() before of_find_xxx APIe1ab98ec2bASoC: wcd934x: fix order of Slimbus unprepare/disablea2140a9922ASoC: wcd9335: fix order of Slimbus unprepare/disabled0507b36daplatform/chrome: cros_ec_proto: Update version on GET_NEXT_EVENT failurefcfeecca15quota: Check next/prev free block number after reading from quota file17214cfab7HID: multitouch: Add memory barriers219e4a0f9dfs: dlm: handle -EBUSY first in lock arg validation34ed22dd28fs: dlm: fix race between test_bit() and queue_work()7fa5304c4bi2c: designware: Fix handling of real but unexpected device interruptsf9effcefa8mmc: sdhci-sprd: Fix minimum clock limita4df91a88ccan: kvaser_usb_leaf: Fix CAN state after restart0c28c2c0cfcan: kvaser_usb_leaf: Fix TX queue out of sync after restartb8c4f6345ecan: kvaser_usb_leaf: Fix overread with an invalid commandde4434d682can: kvaser_usb: Fix use of uninitialized completion354d768e31usb: add quirks for Lenovo OneLink+ Dock103b459590xhci: dbc: Fix memory leak in xhci_alloc_dbc()39f4c90b99iio: pressure: dps310: Reset chip after timeoutbc493cd754iio: pressure: dps310: Refactor startup procedure5f6bfc1926iio: adc: ad7923: fix channel readings for some variants1be580ed84iio: ltc2497: Fix reading conversion resultsef4018707diio: dac: ad5593r: Fix i2c read protocol requirements60480291c1cifs: Fix the error length of VALIDATE_NEGOTIATE_INFO message0d814a2199cifs: destage dirty pages before re-reading them for cache=none15993e9a9bhv_netvsc: Fix race between VF offering and VF association message from hostf9dc33f231io_uring/net: don't update msg_name if not provideda1bd289c10mtd: rawnand: atmel: Unmap streaming DMA mappings3e4d2375d1ALSA: hda/realtek: Add Intel Reference SSID to support headset keys41e83faf03ALSA: hda/realtek: Add quirk for ASUS GV601R laptopc01f385c70ALSA: hda/realtek: Correct pin configs for ASUS G533Z0d50e05eccALSA: hda/realtek: remove ALC289_FIXUP_DUAL_SPK for Dell 5530ec439b97d9ALSA: usb-audio: Fix NULL dererence at error path0672215994ALSA: usb-audio: Fix potential memory leaks550ca3082eALSA: rawmidi: Drop register_mutex in snd_rawmidi_free()45899fae65ALSA: oss: Fix potential deadlock at unregistration5ca155aa79Revert "fs: check FMODE_LSEEK to control internal pipe splicing" And update the .xml file to handle some private pointer changes and an abi preservation change: type 'struct sk_buff' changed member 'union { struct { __u8 scm_io_uring; __u8 android_kabi_reserved1_padding1; __u16 android_kabi_reserved1_padding2; __u32 android_kabi_reserved1_padding3; }; struct { u64 android_kabi_reserved1; }; union { }; }' was added member 'u64 android_kabi_reserved1' was removed type 'struct super_block' changed member changed from 'struct key * s_master_keys' to 'struct fscrypt_keyring * s_master_keys' type changed from 'struct key *' to 'struct fscrypt_keyring *' pointed-to type changed from 'struct key' to 'struct fscrypt_keyring' type 'struct fscrypt_info' changed member changed from 'struct key * ci_master_key' to 'struct fscrypt_master_key * ci_master_key' type changed from 'struct key *' to 'struct fscrypt_master_key *' pointed-to type changed from 'struct key' to 'struct fscrypt_master_key' Change-Id: Id0a60a4e0d8a036fffd52dad04135cf57d98f09f Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
3849 lines
103 KiB
C
3849 lines
103 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* mm/mmap.c
|
|
*
|
|
* Written by obz.
|
|
*
|
|
* Address space accounting code <alan@lxorguk.ukuu.org.uk>
|
|
*/
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/backing-dev.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/mm_inline.h>
|
|
#include <linux/vmacache.h>
|
|
#include <linux/shm.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/capability.h>
|
|
#include <linux/init.h>
|
|
#include <linux/file.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/personality.h>
|
|
#include <linux/security.h>
|
|
#include <linux/hugetlb.h>
|
|
#include <linux/shmem_fs.h>
|
|
#include <linux/profile.h>
|
|
#include <linux/export.h>
|
|
#include <linux/mount.h>
|
|
#include <linux/mempolicy.h>
|
|
#include <linux/rmap.h>
|
|
#include <linux/mmu_notifier.h>
|
|
#include <linux/mmdebug.h>
|
|
#include <linux/perf_event.h>
|
|
#include <linux/audit.h>
|
|
#include <linux/khugepaged.h>
|
|
#include <linux/uprobes.h>
|
|
#include <linux/rbtree_augmented.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/memory.h>
|
|
#include <linux/printk.h>
|
|
#include <linux/userfaultfd_k.h>
|
|
#include <linux/moduleparam.h>
|
|
#include <linux/pkeys.h>
|
|
#include <linux/oom.h>
|
|
#include <linux/sched/mm.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/tlb.h>
|
|
#include <asm/mmu_context.h>
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
#include <trace/events/mmap.h>
|
|
#undef CREATE_TRACE_POINTS
|
|
#include <trace/hooks/mm.h>
|
|
|
|
#include "internal.h"
|
|
|
|
#ifndef arch_mmap_check
|
|
#define arch_mmap_check(addr, len, flags) (0)
|
|
#endif
|
|
|
|
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
|
|
const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
|
|
const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX;
|
|
int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
|
|
#endif
|
|
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
|
|
const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
|
|
const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
|
|
int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
|
|
#endif
|
|
|
|
static bool ignore_rlimit_data;
|
|
core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
|
|
|
|
static void unmap_region(struct mm_struct *mm,
|
|
struct vm_area_struct *vma, struct vm_area_struct *prev,
|
|
unsigned long start, unsigned long end);
|
|
|
|
/* description of effects of mapping type and prot in current implementation.
|
|
* this is due to the limited x86 page protection hardware. The expected
|
|
* behavior is in parens:
|
|
*
|
|
* map_type prot
|
|
* PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
|
|
* MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
|
|
* w: (no) no w: (no) no w: (yes) yes w: (no) no
|
|
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes
|
|
*
|
|
* MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
|
|
* w: (no) no w: (no) no w: (copy) copy w: (no) no
|
|
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes
|
|
*
|
|
* On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
|
|
* MAP_PRIVATE (with Enhanced PAN supported):
|
|
* r: (no) no
|
|
* w: (no) no
|
|
* x: (yes) yes
|
|
*/
|
|
pgprot_t protection_map[16] __ro_after_init = {
|
|
__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
|
|
__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
|
|
};
|
|
|
|
#ifndef CONFIG_ARCH_HAS_FILTER_PGPROT
|
|
static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
|
|
{
|
|
return prot;
|
|
}
|
|
#endif
|
|
|
|
pgprot_t vm_get_page_prot(unsigned long vm_flags)
|
|
{
|
|
pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags &
|
|
(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
|
|
pgprot_val(arch_vm_get_page_prot(vm_flags)));
|
|
|
|
return arch_filter_pgprot(ret);
|
|
}
|
|
EXPORT_SYMBOL(vm_get_page_prot);
|
|
|
|
static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
|
|
{
|
|
return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
|
|
}
|
|
|
|
/* Update vma->vm_page_prot to reflect vma->vm_flags. */
|
|
void vma_set_page_prot(struct vm_area_struct *vma)
|
|
{
|
|
unsigned long vm_flags = vma->vm_flags;
|
|
pgprot_t vm_page_prot;
|
|
|
|
vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
|
|
if (vma_wants_writenotify(vma, vm_page_prot)) {
|
|
vm_flags &= ~VM_SHARED;
|
|
vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
|
|
}
|
|
/* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
|
|
WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
|
|
}
|
|
|
|
/*
|
|
* Requires inode->i_mapping->i_mmap_rwsem
|
|
*/
|
|
static void __remove_shared_vm_struct(struct vm_area_struct *vma,
|
|
struct file *file, struct address_space *mapping)
|
|
{
|
|
if (vma->vm_flags & VM_SHARED)
|
|
mapping_unmap_writable(mapping);
|
|
|
|
flush_dcache_mmap_lock(mapping);
|
|
vma_interval_tree_remove(vma, &mapping->i_mmap);
|
|
flush_dcache_mmap_unlock(mapping);
|
|
}
|
|
|
|
/*
|
|
* Unlink a file-based vm structure from its interval tree, to hide
|
|
* vma from rmap and vmtruncate before freeing its page tables.
|
|
*/
|
|
void unlink_file_vma(struct vm_area_struct *vma)
|
|
{
|
|
struct file *file = vma->vm_file;
|
|
|
|
if (file) {
|
|
struct address_space *mapping = file->f_mapping;
|
|
i_mmap_lock_write(mapping);
|
|
__remove_shared_vm_struct(vma, file, mapping);
|
|
i_mmap_unlock_write(mapping);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Close a vm structure and free it, returning the next.
|
|
*/
|
|
static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
|
|
{
|
|
struct vm_area_struct *next = vma->vm_next;
|
|
|
|
might_sleep();
|
|
if (vma->vm_ops && vma->vm_ops->close)
|
|
vma->vm_ops->close(vma);
|
|
mpol_put(vma_policy(vma));
|
|
/* fput(vma->vm_file) happens in vm_area_free after an RCU delay. */
|
|
vm_area_free(vma);
|
|
return next;
|
|
}
|
|
|
|
static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags,
|
|
struct list_head *uf);
|
|
SYSCALL_DEFINE1(brk, unsigned long, brk)
|
|
{
|
|
unsigned long newbrk, oldbrk, origbrk;
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *next;
|
|
unsigned long min_brk;
|
|
bool populate;
|
|
bool downgraded = false;
|
|
LIST_HEAD(uf);
|
|
|
|
if (mmap_write_lock_killable(mm))
|
|
return -EINTR;
|
|
|
|
origbrk = mm->brk;
|
|
|
|
#ifdef CONFIG_COMPAT_BRK
|
|
/*
|
|
* CONFIG_COMPAT_BRK can still be overridden by setting
|
|
* randomize_va_space to 2, which will still cause mm->start_brk
|
|
* to be arbitrarily shifted
|
|
*/
|
|
if (current->brk_randomized)
|
|
min_brk = mm->start_brk;
|
|
else
|
|
min_brk = mm->end_data;
|
|
#else
|
|
min_brk = mm->start_brk;
|
|
#endif
|
|
if (brk < min_brk)
|
|
goto out;
|
|
|
|
/*
|
|
* Check against rlimit here. If this check is done later after the test
|
|
* of oldbrk with newbrk then it can escape the test and let the data
|
|
* segment grow beyond its set limit the in case where the limit is
|
|
* not page aligned -Ram Gupta
|
|
*/
|
|
if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
|
|
mm->end_data, mm->start_data))
|
|
goto out;
|
|
|
|
newbrk = PAGE_ALIGN(brk);
|
|
oldbrk = PAGE_ALIGN(mm->brk);
|
|
if (oldbrk == newbrk) {
|
|
mm->brk = brk;
|
|
goto success;
|
|
}
|
|
|
|
/*
|
|
* Always allow shrinking brk.
|
|
* __do_munmap() may downgrade mmap_lock to read.
|
|
*/
|
|
if (brk <= mm->brk) {
|
|
int ret;
|
|
|
|
/*
|
|
* mm->brk must to be protected by write mmap_lock so update it
|
|
* before downgrading mmap_lock. When __do_munmap() fails,
|
|
* mm->brk will be restored from origbrk.
|
|
*/
|
|
mm->brk = brk;
|
|
ret = __do_munmap(mm, newbrk, oldbrk-newbrk, &uf, true);
|
|
if (ret < 0) {
|
|
mm->brk = origbrk;
|
|
goto out;
|
|
} else if (ret == 1) {
|
|
downgraded = true;
|
|
}
|
|
goto success;
|
|
}
|
|
|
|
/* Check against existing mmap mappings. */
|
|
next = find_vma(mm, oldbrk);
|
|
if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
|
|
goto out;
|
|
|
|
/* Ok, looks good - let it rip. */
|
|
if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0)
|
|
goto out;
|
|
mm->brk = brk;
|
|
|
|
success:
|
|
populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
|
|
if (downgraded)
|
|
mmap_read_unlock(mm);
|
|
else
|
|
mmap_write_unlock(mm);
|
|
userfaultfd_unmap_complete(mm, &uf);
|
|
if (populate)
|
|
mm_populate(oldbrk, newbrk - oldbrk);
|
|
return brk;
|
|
|
|
out:
|
|
mmap_write_unlock(mm);
|
|
return origbrk;
|
|
}
|
|
|
|
static inline unsigned long vma_compute_gap(struct vm_area_struct *vma)
|
|
{
|
|
unsigned long gap, prev_end;
|
|
|
|
/*
|
|
* Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
|
|
* allow two stack_guard_gaps between them here, and when choosing
|
|
* an unmapped area; whereas when expanding we only require one.
|
|
* That's a little inconsistent, but keeps the code here simpler.
|
|
*/
|
|
gap = vm_start_gap(vma);
|
|
if (vma->vm_prev) {
|
|
prev_end = vm_end_gap(vma->vm_prev);
|
|
if (gap > prev_end)
|
|
gap -= prev_end;
|
|
else
|
|
gap = 0;
|
|
}
|
|
return gap;
|
|
}
|
|
|
|
#ifdef CONFIG_DEBUG_VM_RB
|
|
static unsigned long vma_compute_subtree_gap(struct vm_area_struct *vma)
|
|
{
|
|
unsigned long max = vma_compute_gap(vma), subtree_gap;
|
|
if (vma->vm_rb.rb_left) {
|
|
subtree_gap = rb_entry(vma->vm_rb.rb_left,
|
|
struct vm_area_struct, vm_rb)->rb_subtree_gap;
|
|
if (subtree_gap > max)
|
|
max = subtree_gap;
|
|
}
|
|
if (vma->vm_rb.rb_right) {
|
|
subtree_gap = rb_entry(vma->vm_rb.rb_right,
|
|
struct vm_area_struct, vm_rb)->rb_subtree_gap;
|
|
if (subtree_gap > max)
|
|
max = subtree_gap;
|
|
}
|
|
return max;
|
|
}
|
|
|
|
static int browse_rb(struct mm_struct *mm)
|
|
{
|
|
struct rb_root *root = &mm->mm_rb;
|
|
int i = 0, j, bug = 0;
|
|
struct rb_node *nd, *pn = NULL;
|
|
unsigned long prev = 0, pend = 0;
|
|
|
|
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
|
|
struct vm_area_struct *vma;
|
|
vma = rb_entry(nd, struct vm_area_struct, vm_rb);
|
|
if (vma->vm_start < prev) {
|
|
pr_emerg("vm_start %lx < prev %lx\n",
|
|
vma->vm_start, prev);
|
|
bug = 1;
|
|
}
|
|
if (vma->vm_start < pend) {
|
|
pr_emerg("vm_start %lx < pend %lx\n",
|
|
vma->vm_start, pend);
|
|
bug = 1;
|
|
}
|
|
if (vma->vm_start > vma->vm_end) {
|
|
pr_emerg("vm_start %lx > vm_end %lx\n",
|
|
vma->vm_start, vma->vm_end);
|
|
bug = 1;
|
|
}
|
|
spin_lock(&mm->page_table_lock);
|
|
if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
|
|
pr_emerg("free gap %lx, correct %lx\n",
|
|
vma->rb_subtree_gap,
|
|
vma_compute_subtree_gap(vma));
|
|
bug = 1;
|
|
}
|
|
spin_unlock(&mm->page_table_lock);
|
|
i++;
|
|
pn = nd;
|
|
prev = vma->vm_start;
|
|
pend = vma->vm_end;
|
|
}
|
|
j = 0;
|
|
for (nd = pn; nd; nd = rb_prev(nd))
|
|
j++;
|
|
if (i != j) {
|
|
pr_emerg("backwards %d, forwards %d\n", j, i);
|
|
bug = 1;
|
|
}
|
|
return bug ? -1 : i;
|
|
}
|
|
|
|
static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
|
|
{
|
|
struct rb_node *nd;
|
|
|
|
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
|
|
struct vm_area_struct *vma;
|
|
vma = rb_entry(nd, struct vm_area_struct, vm_rb);
|
|
VM_BUG_ON_VMA(vma != ignore &&
|
|
vma->rb_subtree_gap != vma_compute_subtree_gap(vma),
|
|
vma);
|
|
}
|
|
}
|
|
|
|
static void validate_mm(struct mm_struct *mm)
|
|
{
|
|
int bug = 0;
|
|
int i = 0;
|
|
unsigned long highest_address = 0;
|
|
struct vm_area_struct *vma = mm->mmap;
|
|
|
|
while (vma) {
|
|
struct anon_vma *anon_vma = vma->anon_vma;
|
|
struct anon_vma_chain *avc;
|
|
|
|
if (anon_vma) {
|
|
anon_vma_lock_read(anon_vma);
|
|
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
|
|
anon_vma_interval_tree_verify(avc);
|
|
anon_vma_unlock_read(anon_vma);
|
|
}
|
|
|
|
highest_address = vm_end_gap(vma);
|
|
vma = vma->vm_next;
|
|
i++;
|
|
}
|
|
if (i != mm->map_count) {
|
|
pr_emerg("map_count %d vm_next %d\n", mm->map_count, i);
|
|
bug = 1;
|
|
}
|
|
if (highest_address != mm->highest_vm_end) {
|
|
pr_emerg("mm->highest_vm_end %lx, found %lx\n",
|
|
mm->highest_vm_end, highest_address);
|
|
bug = 1;
|
|
}
|
|
i = browse_rb(mm);
|
|
if (i != mm->map_count) {
|
|
if (i != -1)
|
|
pr_emerg("map_count %d rb %d\n", mm->map_count, i);
|
|
bug = 1;
|
|
}
|
|
VM_BUG_ON_MM(bug, mm);
|
|
}
|
|
#else
|
|
#define validate_mm_rb(root, ignore) do { } while (0)
|
|
#define validate_mm(mm) do { } while (0)
|
|
#endif
|
|
|
|
RB_DECLARE_CALLBACKS_MAX(static, vma_gap_callbacks,
|
|
struct vm_area_struct, vm_rb,
|
|
unsigned long, rb_subtree_gap, vma_compute_gap)
|
|
|
|
/*
|
|
* Update augmented rbtree rb_subtree_gap values after vma->vm_start or
|
|
* vma->vm_prev->vm_end values changed, without modifying the vma's position
|
|
* in the rbtree.
|
|
*/
|
|
static void vma_gap_update(struct vm_area_struct *vma)
|
|
{
|
|
/*
|
|
* As it turns out, RB_DECLARE_CALLBACKS_MAX() already created
|
|
* a callback function that does exactly what we want.
|
|
*/
|
|
vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
|
|
}
|
|
|
|
static inline void vma_rb_insert(struct vm_area_struct *vma,
|
|
struct rb_root *root)
|
|
{
|
|
/* All rb_subtree_gap values must be consistent prior to insertion */
|
|
validate_mm_rb(root, NULL);
|
|
|
|
rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
|
|
}
|
|
|
|
static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
|
|
{
|
|
/*
|
|
* Note rb_erase_augmented is a fairly large inline function,
|
|
* so make sure we instantiate it only once with our desired
|
|
* augmented rbtree callbacks.
|
|
*/
|
|
rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
|
|
}
|
|
|
|
static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,
|
|
struct rb_root *root,
|
|
struct vm_area_struct *ignore)
|
|
{
|
|
/*
|
|
* All rb_subtree_gap values must be consistent prior to erase,
|
|
* with the possible exception of
|
|
*
|
|
* a. the "next" vma being erased if next->vm_start was reduced in
|
|
* __vma_adjust() -> __vma_unlink()
|
|
* b. the vma being erased in detach_vmas_to_be_unmapped() ->
|
|
* vma_rb_erase()
|
|
*/
|
|
validate_mm_rb(root, ignore);
|
|
|
|
__vma_rb_erase(vma, root);
|
|
}
|
|
|
|
static __always_inline void vma_rb_erase(struct vm_area_struct *vma,
|
|
struct rb_root *root)
|
|
{
|
|
vma_rb_erase_ignore(vma, root, vma);
|
|
}
|
|
|
|
/*
|
|
* vma has some anon_vma assigned, and is already inserted on that
|
|
* anon_vma's interval trees.
|
|
*
|
|
* Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
|
|
* vma must be removed from the anon_vma's interval trees using
|
|
* anon_vma_interval_tree_pre_update_vma().
|
|
*
|
|
* After the update, the vma will be reinserted using
|
|
* anon_vma_interval_tree_post_update_vma().
|
|
*
|
|
* The entire update must be protected by exclusive mmap_lock and by
|
|
* the root anon_vma's mutex.
|
|
*/
|
|
static inline void
|
|
anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
|
|
{
|
|
struct anon_vma_chain *avc;
|
|
|
|
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
|
|
anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
|
|
}
|
|
|
|
static inline void
|
|
anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
|
|
{
|
|
struct anon_vma_chain *avc;
|
|
|
|
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
|
|
anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
|
|
}
|
|
|
|
static int find_vma_links(struct mm_struct *mm, unsigned long addr,
|
|
unsigned long end, struct vm_area_struct **pprev,
|
|
struct rb_node ***rb_link, struct rb_node **rb_parent)
|
|
{
|
|
struct rb_node **__rb_link, *__rb_parent, *rb_prev;
|
|
|
|
mmap_assert_locked(mm);
|
|
__rb_link = &mm->mm_rb.rb_node;
|
|
rb_prev = __rb_parent = NULL;
|
|
|
|
while (*__rb_link) {
|
|
struct vm_area_struct *vma_tmp;
|
|
|
|
__rb_parent = *__rb_link;
|
|
vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
|
|
|
|
if (vma_tmp->vm_end > addr) {
|
|
/* Fail if an existing vma overlaps the area */
|
|
if (vma_tmp->vm_start < end)
|
|
return -ENOMEM;
|
|
__rb_link = &__rb_parent->rb_left;
|
|
} else {
|
|
rb_prev = __rb_parent;
|
|
__rb_link = &__rb_parent->rb_right;
|
|
}
|
|
}
|
|
|
|
*pprev = NULL;
|
|
if (rb_prev)
|
|
*pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
|
|
*rb_link = __rb_link;
|
|
*rb_parent = __rb_parent;
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* vma_next() - Get the next VMA.
|
|
* @mm: The mm_struct.
|
|
* @vma: The current vma.
|
|
*
|
|
* If @vma is NULL, return the first vma in the mm.
|
|
*
|
|
* Returns: The next VMA after @vma.
|
|
*/
|
|
static inline struct vm_area_struct *vma_next(struct mm_struct *mm,
|
|
struct vm_area_struct *vma)
|
|
{
|
|
if (!vma)
|
|
return mm->mmap;
|
|
|
|
return vma->vm_next;
|
|
}
|
|
|
|
/*
|
|
* munmap_vma_range() - munmap VMAs that overlap a range.
|
|
* @mm: The mm struct
|
|
* @start: The start of the range.
|
|
* @len: The length of the range.
|
|
* @pprev: pointer to the pointer that will be set to previous vm_area_struct
|
|
* @rb_link: the rb_node
|
|
* @rb_parent: the parent rb_node
|
|
*
|
|
* Find all the vm_area_struct that overlap from @start to
|
|
* @end and munmap them. Set @pprev to the previous vm_area_struct.
|
|
*
|
|
* Returns: -ENOMEM on munmap failure or 0 on success.
|
|
*/
|
|
static inline int
|
|
munmap_vma_range(struct mm_struct *mm, unsigned long start, unsigned long len,
|
|
struct vm_area_struct **pprev, struct rb_node ***link,
|
|
struct rb_node **parent, struct list_head *uf)
|
|
{
|
|
|
|
while (find_vma_links(mm, start, start + len, pprev, link, parent))
|
|
if (do_munmap(mm, start, len, uf))
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
}
|
|
static unsigned long count_vma_pages_range(struct mm_struct *mm,
|
|
unsigned long addr, unsigned long end)
|
|
{
|
|
unsigned long nr_pages = 0;
|
|
struct vm_area_struct *vma;
|
|
|
|
/* Find first overlapping mapping */
|
|
vma = find_vma_intersection(mm, addr, end);
|
|
if (!vma)
|
|
return 0;
|
|
|
|
nr_pages = (min(end, vma->vm_end) -
|
|
max(addr, vma->vm_start)) >> PAGE_SHIFT;
|
|
|
|
/* Iterate over the rest of the overlaps */
|
|
for (vma = vma->vm_next; vma; vma = vma->vm_next) {
|
|
unsigned long overlap_len;
|
|
|
|
if (vma->vm_start > end)
|
|
break;
|
|
|
|
overlap_len = min(end, vma->vm_end) - vma->vm_start;
|
|
nr_pages += overlap_len >> PAGE_SHIFT;
|
|
}
|
|
|
|
return nr_pages;
|
|
}
|
|
|
|
void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
struct rb_node **rb_link, struct rb_node *rb_parent)
|
|
{
|
|
/* Update tracking information for the gap following the new vma. */
|
|
if (vma->vm_next)
|
|
vma_gap_update(vma->vm_next);
|
|
else
|
|
mm->highest_vm_end = vm_end_gap(vma);
|
|
|
|
/*
|
|
* vma->vm_prev wasn't known when we followed the rbtree to find the
|
|
* correct insertion point for that vma. As a result, we could not
|
|
* update the vma vm_rb parents rb_subtree_gap values on the way down.
|
|
* So, we first insert the vma with a zero rb_subtree_gap value
|
|
* (to be consistent with what we did on the way down), and then
|
|
* immediately update the gap to the correct value. Finally we
|
|
* rebalance the rbtree after all augmented values have been set.
|
|
*/
|
|
rb_link_node(&vma->vm_rb, rb_parent, rb_link);
|
|
vma->rb_subtree_gap = 0;
|
|
vma_gap_update(vma);
|
|
vma_rb_insert(vma, &mm->mm_rb);
|
|
}
|
|
|
|
static void __vma_link_file(struct vm_area_struct *vma)
|
|
{
|
|
struct file *file;
|
|
|
|
file = vma->vm_file;
|
|
if (file) {
|
|
struct address_space *mapping = file->f_mapping;
|
|
|
|
if (vma->vm_flags & VM_SHARED)
|
|
mapping_allow_writable(mapping);
|
|
|
|
flush_dcache_mmap_lock(mapping);
|
|
vma_interval_tree_insert(vma, &mapping->i_mmap);
|
|
flush_dcache_mmap_unlock(mapping);
|
|
}
|
|
}
|
|
|
|
static void
|
|
__vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
struct vm_area_struct *prev, struct rb_node **rb_link,
|
|
struct rb_node *rb_parent)
|
|
{
|
|
__vma_link_list(mm, vma, prev);
|
|
__vma_link_rb(mm, vma, rb_link, rb_parent);
|
|
}
|
|
|
|
static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
struct vm_area_struct *prev, struct rb_node **rb_link,
|
|
struct rb_node *rb_parent)
|
|
{
|
|
struct address_space *mapping = NULL;
|
|
|
|
if (vma->vm_file) {
|
|
mapping = vma->vm_file->f_mapping;
|
|
i_mmap_lock_write(mapping);
|
|
}
|
|
|
|
__vma_link(mm, vma, prev, rb_link, rb_parent);
|
|
__vma_link_file(vma);
|
|
|
|
if (mapping)
|
|
i_mmap_unlock_write(mapping);
|
|
|
|
mm->map_count++;
|
|
validate_mm(mm);
|
|
}
|
|
|
|
/*
|
|
* Helper for vma_adjust() in the split_vma insert case: insert a vma into the
|
|
* mm's list and rbtree. It has already been inserted into the interval tree.
|
|
*/
|
|
static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
|
|
{
|
|
struct vm_area_struct *prev;
|
|
struct rb_node **rb_link, *rb_parent;
|
|
|
|
if (find_vma_links(mm, vma->vm_start, vma->vm_end,
|
|
&prev, &rb_link, &rb_parent))
|
|
BUG();
|
|
__vma_link(mm, vma, prev, rb_link, rb_parent);
|
|
mm->map_count++;
|
|
}
|
|
|
|
static __always_inline void __vma_unlink(struct mm_struct *mm,
|
|
struct vm_area_struct *vma,
|
|
struct vm_area_struct *ignore)
|
|
{
|
|
vma_rb_erase_ignore(vma, &mm->mm_rb, ignore);
|
|
__vma_unlink_list(mm, vma);
|
|
/* Kill the cache */
|
|
vmacache_invalidate(mm);
|
|
}
|
|
|
|
/*
|
|
* We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
|
|
* is already present in an i_mmap tree without adjusting the tree.
|
|
* The following helper function should be used when such adjustments
|
|
* are necessary. The "insert" vma (if any) is to be inserted
|
|
* before we drop the necessary locks.
|
|
*/
|
|
int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
|
|
unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
|
|
struct vm_area_struct *expand)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
struct vm_area_struct *next = vma->vm_next, *orig_vma = vma;
|
|
struct address_space *mapping = NULL;
|
|
struct rb_root_cached *root = NULL;
|
|
struct anon_vma *anon_vma = NULL;
|
|
struct file *file = vma->vm_file;
|
|
bool start_changed = false, end_changed = false;
|
|
long adjust_next = 0;
|
|
int remove_next = 0;
|
|
|
|
if (next && !insert) {
|
|
struct vm_area_struct *exporter = NULL, *importer = NULL;
|
|
|
|
if (end >= next->vm_end) {
|
|
/*
|
|
* vma expands, overlapping all the next, and
|
|
* perhaps the one after too (mprotect case 6).
|
|
* The only other cases that gets here are
|
|
* case 1, case 7 and case 8.
|
|
*/
|
|
if (next == expand) {
|
|
/*
|
|
* The only case where we don't expand "vma"
|
|
* and we expand "next" instead is case 8.
|
|
*/
|
|
VM_WARN_ON(end != next->vm_end);
|
|
/*
|
|
* remove_next == 3 means we're
|
|
* removing "vma" and that to do so we
|
|
* swapped "vma" and "next".
|
|
*/
|
|
remove_next = 3;
|
|
VM_WARN_ON(file != next->vm_file);
|
|
swap(vma, next);
|
|
} else {
|
|
VM_WARN_ON(expand != vma);
|
|
/*
|
|
* case 1, 6, 7, remove_next == 2 is case 6,
|
|
* remove_next == 1 is case 1 or 7.
|
|
*/
|
|
remove_next = 1 + (end > next->vm_end);
|
|
VM_WARN_ON(remove_next == 2 &&
|
|
end != next->vm_next->vm_end);
|
|
/* trim end to next, for case 6 first pass */
|
|
end = next->vm_end;
|
|
}
|
|
|
|
exporter = next;
|
|
importer = vma;
|
|
|
|
/*
|
|
* If next doesn't have anon_vma, import from vma after
|
|
* next, if the vma overlaps with it.
|
|
*/
|
|
if (remove_next == 2 && !next->anon_vma)
|
|
exporter = next->vm_next;
|
|
|
|
} else if (end > next->vm_start) {
|
|
/*
|
|
* vma expands, overlapping part of the next:
|
|
* mprotect case 5 shifting the boundary up.
|
|
*/
|
|
adjust_next = (end - next->vm_start);
|
|
exporter = next;
|
|
importer = vma;
|
|
VM_WARN_ON(expand != importer);
|
|
} else if (end < vma->vm_end) {
|
|
/*
|
|
* vma shrinks, and !insert tells it's not
|
|
* split_vma inserting another: so it must be
|
|
* mprotect case 4 shifting the boundary down.
|
|
*/
|
|
adjust_next = -(vma->vm_end - end);
|
|
exporter = vma;
|
|
importer = next;
|
|
VM_WARN_ON(expand != importer);
|
|
}
|
|
|
|
/*
|
|
* Easily overlooked: when mprotect shifts the boundary,
|
|
* make sure the expanding vma has anon_vma set if the
|
|
* shrinking vma had, to cover any anon pages imported.
|
|
*/
|
|
if (exporter && exporter->anon_vma && !importer->anon_vma) {
|
|
int error;
|
|
|
|
importer->anon_vma = exporter->anon_vma;
|
|
error = anon_vma_clone(importer, exporter);
|
|
if (error)
|
|
return error;
|
|
}
|
|
}
|
|
again:
|
|
vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
|
|
|
|
if (file) {
|
|
mapping = file->f_mapping;
|
|
root = &mapping->i_mmap;
|
|
uprobe_munmap(vma, vma->vm_start, vma->vm_end);
|
|
|
|
if (adjust_next)
|
|
uprobe_munmap(next, next->vm_start, next->vm_end);
|
|
|
|
i_mmap_lock_write(mapping);
|
|
if (insert) {
|
|
/*
|
|
* Put into interval tree now, so instantiated pages
|
|
* are visible to arm/parisc __flush_dcache_page
|
|
* throughout; but we cannot insert into address
|
|
* space until vma start or end is updated.
|
|
*/
|
|
__vma_link_file(insert);
|
|
}
|
|
}
|
|
|
|
anon_vma = vma->anon_vma;
|
|
if (!anon_vma && adjust_next)
|
|
anon_vma = next->anon_vma;
|
|
if (anon_vma) {
|
|
VM_WARN_ON(adjust_next && next->anon_vma &&
|
|
anon_vma != next->anon_vma);
|
|
anon_vma_lock_write(anon_vma);
|
|
anon_vma_interval_tree_pre_update_vma(vma);
|
|
if (adjust_next)
|
|
anon_vma_interval_tree_pre_update_vma(next);
|
|
}
|
|
|
|
if (file) {
|
|
flush_dcache_mmap_lock(mapping);
|
|
vma_interval_tree_remove(vma, root);
|
|
if (adjust_next)
|
|
vma_interval_tree_remove(next, root);
|
|
}
|
|
|
|
if (start != vma->vm_start) {
|
|
vma->vm_start = start;
|
|
start_changed = true;
|
|
}
|
|
if (end != vma->vm_end) {
|
|
vma->vm_end = end;
|
|
end_changed = true;
|
|
}
|
|
vma->vm_pgoff = pgoff;
|
|
if (adjust_next) {
|
|
next->vm_start += adjust_next;
|
|
next->vm_pgoff += adjust_next >> PAGE_SHIFT;
|
|
}
|
|
|
|
if (file) {
|
|
if (adjust_next)
|
|
vma_interval_tree_insert(next, root);
|
|
vma_interval_tree_insert(vma, root);
|
|
flush_dcache_mmap_unlock(mapping);
|
|
}
|
|
|
|
if (remove_next) {
|
|
/*
|
|
* vma_merge has merged next into vma, and needs
|
|
* us to remove next before dropping the locks.
|
|
*/
|
|
if (remove_next != 3)
|
|
__vma_unlink(mm, next, next);
|
|
else
|
|
/*
|
|
* vma is not before next if they've been
|
|
* swapped.
|
|
*
|
|
* pre-swap() next->vm_start was reduced so
|
|
* tell validate_mm_rb to ignore pre-swap()
|
|
* "next" (which is stored in post-swap()
|
|
* "vma").
|
|
*/
|
|
__vma_unlink(mm, next, vma);
|
|
if (file)
|
|
__remove_shared_vm_struct(next, file, mapping);
|
|
} else if (insert) {
|
|
/*
|
|
* split_vma has split insert from vma, and needs
|
|
* us to insert it before dropping the locks
|
|
* (it may either follow vma or precede it).
|
|
*/
|
|
__insert_vm_struct(mm, insert);
|
|
} else {
|
|
if (start_changed)
|
|
vma_gap_update(vma);
|
|
if (end_changed) {
|
|
if (!next)
|
|
mm->highest_vm_end = vm_end_gap(vma);
|
|
else if (!adjust_next)
|
|
vma_gap_update(next);
|
|
}
|
|
}
|
|
|
|
if (anon_vma) {
|
|
anon_vma_interval_tree_post_update_vma(vma);
|
|
if (adjust_next)
|
|
anon_vma_interval_tree_post_update_vma(next);
|
|
anon_vma_unlock_write(anon_vma);
|
|
}
|
|
|
|
if (file) {
|
|
i_mmap_unlock_write(mapping);
|
|
uprobe_mmap(vma);
|
|
|
|
if (adjust_next)
|
|
uprobe_mmap(next);
|
|
}
|
|
|
|
if (remove_next) {
|
|
if (file) {
|
|
uprobe_munmap(next, next->vm_start, next->vm_end);
|
|
/* fput(file) happens whthin vm_area_free(next) */
|
|
VM_BUG_ON(file != next->vm_file);
|
|
}
|
|
if (next->anon_vma)
|
|
anon_vma_merge(vma, next);
|
|
mm->map_count--;
|
|
mpol_put(vma_policy(next));
|
|
vm_area_free(next);
|
|
/*
|
|
* In mprotect's case 6 (see comments on vma_merge),
|
|
* we must remove another next too. It would clutter
|
|
* up the code too much to do both in one go.
|
|
*/
|
|
if (remove_next != 3) {
|
|
/*
|
|
* If "next" was removed and vma->vm_end was
|
|
* expanded (up) over it, in turn
|
|
* "next->vm_prev->vm_end" changed and the
|
|
* "vma->vm_next" gap must be updated.
|
|
*/
|
|
next = vma->vm_next;
|
|
} else {
|
|
/*
|
|
* For the scope of the comment "next" and
|
|
* "vma" considered pre-swap(): if "vma" was
|
|
* removed, next->vm_start was expanded (down)
|
|
* over it and the "next" gap must be updated.
|
|
* Because of the swap() the post-swap() "vma"
|
|
* actually points to pre-swap() "next"
|
|
* (post-swap() "next" as opposed is now a
|
|
* dangling pointer).
|
|
*/
|
|
next = vma;
|
|
}
|
|
if (remove_next == 2) {
|
|
remove_next = 1;
|
|
end = next->vm_end;
|
|
goto again;
|
|
}
|
|
else if (next)
|
|
vma_gap_update(next);
|
|
else {
|
|
/*
|
|
* If remove_next == 2 we obviously can't
|
|
* reach this path.
|
|
*
|
|
* If remove_next == 3 we can't reach this
|
|
* path because pre-swap() next is always not
|
|
* NULL. pre-swap() "next" is not being
|
|
* removed and its next->vm_end is not altered
|
|
* (and furthermore "end" already matches
|
|
* next->vm_end in remove_next == 3).
|
|
*
|
|
* We reach this only in the remove_next == 1
|
|
* case if the "next" vma that was removed was
|
|
* the highest vma of the mm. However in such
|
|
* case next->vm_end == "end" and the extended
|
|
* "vma" has vma->vm_end == next->vm_end so
|
|
* mm->highest_vm_end doesn't need any update
|
|
* in remove_next == 1 case.
|
|
*/
|
|
VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
|
|
}
|
|
}
|
|
if (insert && file)
|
|
uprobe_mmap(insert);
|
|
|
|
validate_mm(mm);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* If the vma has a ->close operation then the driver probably needs to release
|
|
* per-vma resources, so we don't attempt to merge those.
|
|
*/
|
|
static inline int is_mergeable_vma(struct vm_area_struct *vma,
|
|
struct file *file, unsigned long vm_flags,
|
|
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
|
|
struct anon_vma_name *anon_name)
|
|
{
|
|
/*
|
|
* VM_SOFTDIRTY should not prevent from VMA merging, if we
|
|
* match the flags but dirty bit -- the caller should mark
|
|
* merged VMA as dirty. If dirty bit won't be excluded from
|
|
* comparison, we increase pressure on the memory system forcing
|
|
* the kernel to generate new VMAs when old one could be
|
|
* extended instead.
|
|
*/
|
|
if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
|
|
return 0;
|
|
if (vma->vm_file != file)
|
|
return 0;
|
|
if (vma->vm_ops && vma->vm_ops->close)
|
|
return 0;
|
|
if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
|
|
return 0;
|
|
if (!anon_vma_name_eq(anon_vma_name(vma), anon_name))
|
|
return 0;
|
|
return 1;
|
|
}
|
|
|
|
static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
|
|
struct anon_vma *anon_vma2,
|
|
struct vm_area_struct *vma)
|
|
{
|
|
/*
|
|
* The list_is_singular() test is to avoid merging VMA cloned from
|
|
* parents. This can improve scalability caused by anon_vma lock.
|
|
*/
|
|
if ((!anon_vma1 || !anon_vma2) && (!vma ||
|
|
list_is_singular(&vma->anon_vma_chain)))
|
|
return 1;
|
|
return anon_vma1 == anon_vma2;
|
|
}
|
|
|
|
/*
|
|
* Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
|
|
* in front of (at a lower virtual address and file offset than) the vma.
|
|
*
|
|
* We cannot merge two vmas if they have differently assigned (non-NULL)
|
|
* anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
|
|
*
|
|
* We don't check here for the merged mmap wrapping around the end of pagecache
|
|
* indices (16TB on ia32) because do_mmap() does not permit mmap's which
|
|
* wrap, nor mmaps which cover the final page at index -1UL.
|
|
*/
|
|
static int
|
|
can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
|
|
struct anon_vma *anon_vma, struct file *file,
|
|
pgoff_t vm_pgoff,
|
|
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
|
|
struct anon_vma_name *anon_name)
|
|
{
|
|
if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
|
|
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
|
|
if (vma->vm_pgoff == vm_pgoff)
|
|
return 1;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
|
|
* beyond (at a higher virtual address and file offset than) the vma.
|
|
*
|
|
* We cannot merge two vmas if they have differently assigned (non-NULL)
|
|
* anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
|
|
*/
|
|
static int
|
|
can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
|
|
struct anon_vma *anon_vma, struct file *file,
|
|
pgoff_t vm_pgoff,
|
|
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
|
|
struct anon_vma_name *anon_name)
|
|
{
|
|
if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
|
|
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
|
|
pgoff_t vm_pglen;
|
|
vm_pglen = vma_pages(vma);
|
|
if (vma->vm_pgoff + vm_pglen == vm_pgoff)
|
|
return 1;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
|
|
* figure out whether that can be merged with its predecessor or its
|
|
* successor. Or both (it neatly fills a hole).
|
|
*
|
|
* In most cases - when called for mmap, brk or mremap - [addr,end) is
|
|
* certain not to be mapped by the time vma_merge is called; but when
|
|
* called for mprotect, it is certain to be already mapped (either at
|
|
* an offset within prev, or at the start of next), and the flags of
|
|
* this area are about to be changed to vm_flags - and the no-change
|
|
* case has already been eliminated.
|
|
*
|
|
* The following mprotect cases have to be considered, where AAAA is
|
|
* the area passed down from mprotect_fixup, never extending beyond one
|
|
* vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
|
|
*
|
|
* AAAA AAAA AAAA
|
|
* PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN
|
|
* cannot merge might become might become
|
|
* PPNNNNNNNNNN PPPPPPPPPPNN
|
|
* mmap, brk or case 4 below case 5 below
|
|
* mremap move:
|
|
* AAAA AAAA
|
|
* PPPP NNNN PPPPNNNNXXXX
|
|
* might become might become
|
|
* PPPPPPPPPPPP 1 or PPPPPPPPPPPP 6 or
|
|
* PPPPPPPPNNNN 2 or PPPPPPPPXXXX 7 or
|
|
* PPPPNNNNNNNN 3 PPPPXXXXXXXX 8
|
|
*
|
|
* It is important for case 8 that the vma NNNN overlapping the
|
|
* region AAAA is never going to extended over XXXX. Instead XXXX must
|
|
* be extended in region AAAA and NNNN must be removed. This way in
|
|
* all cases where vma_merge succeeds, the moment vma_adjust drops the
|
|
* rmap_locks, the properties of the merged vma will be already
|
|
* correct for the whole merged range. Some of those properties like
|
|
* vm_page_prot/vm_flags may be accessed by rmap_walks and they must
|
|
* be correct for the whole merged range immediately after the
|
|
* rmap_locks are released. Otherwise if XXXX would be removed and
|
|
* NNNN would be extended over the XXXX range, remove_migration_ptes
|
|
* or other rmap walkers (if working on addresses beyond the "end"
|
|
* parameter) may establish ptes with the wrong permissions of NNNN
|
|
* instead of the right permissions of XXXX.
|
|
*/
|
|
struct vm_area_struct *vma_merge(struct mm_struct *mm,
|
|
struct vm_area_struct *prev, unsigned long addr,
|
|
unsigned long end, unsigned long vm_flags,
|
|
struct anon_vma *anon_vma, struct file *file,
|
|
pgoff_t pgoff, struct mempolicy *policy,
|
|
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
|
|
struct anon_vma_name *anon_name)
|
|
{
|
|
pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
|
|
struct vm_area_struct *area, *next;
|
|
int err;
|
|
|
|
/*
|
|
* We later require that vma->vm_flags == vm_flags,
|
|
* so this tests vma->vm_flags & VM_SPECIAL, too.
|
|
*/
|
|
if (vm_flags & VM_SPECIAL)
|
|
return NULL;
|
|
|
|
next = vma_next(mm, prev);
|
|
area = next;
|
|
if (area && area->vm_end == end) /* cases 6, 7, 8 */
|
|
next = next->vm_next;
|
|
|
|
/* verify some invariant that must be enforced by the caller */
|
|
VM_WARN_ON(prev && addr <= prev->vm_start);
|
|
VM_WARN_ON(area && end > area->vm_end);
|
|
VM_WARN_ON(addr >= end);
|
|
|
|
/*
|
|
* Can it merge with the predecessor?
|
|
*/
|
|
if (prev && prev->vm_end == addr &&
|
|
mpol_equal(vma_policy(prev), policy) &&
|
|
can_vma_merge_after(prev, vm_flags,
|
|
anon_vma, file, pgoff,
|
|
vm_userfaultfd_ctx, anon_name)) {
|
|
/*
|
|
* OK, it can. Can we now merge in the successor as well?
|
|
*/
|
|
if (next && end == next->vm_start &&
|
|
mpol_equal(policy, vma_policy(next)) &&
|
|
can_vma_merge_before(next, vm_flags,
|
|
anon_vma, file,
|
|
pgoff+pglen,
|
|
vm_userfaultfd_ctx, anon_name) &&
|
|
is_mergeable_anon_vma(prev->anon_vma,
|
|
next->anon_vma, NULL)) {
|
|
/* cases 1, 6 */
|
|
err = __vma_adjust(prev, prev->vm_start,
|
|
next->vm_end, prev->vm_pgoff, NULL,
|
|
prev);
|
|
} else /* cases 2, 5, 7 */
|
|
err = __vma_adjust(prev, prev->vm_start,
|
|
end, prev->vm_pgoff, NULL, prev);
|
|
if (err)
|
|
return NULL;
|
|
khugepaged_enter_vma_merge(prev, vm_flags);
|
|
return prev;
|
|
}
|
|
|
|
/*
|
|
* Can this new request be merged in front of next?
|
|
*/
|
|
if (next && end == next->vm_start &&
|
|
mpol_equal(policy, vma_policy(next)) &&
|
|
can_vma_merge_before(next, vm_flags,
|
|
anon_vma, file, pgoff+pglen,
|
|
vm_userfaultfd_ctx, anon_name)) {
|
|
if (prev && addr < prev->vm_end) /* case 4 */
|
|
err = __vma_adjust(prev, prev->vm_start,
|
|
addr, prev->vm_pgoff, NULL, next);
|
|
else { /* cases 3, 8 */
|
|
err = __vma_adjust(area, addr, next->vm_end,
|
|
next->vm_pgoff - pglen, NULL, next);
|
|
/*
|
|
* In case 3 area is already equal to next and
|
|
* this is a noop, but in case 8 "area" has
|
|
* been removed and next was expanded over it.
|
|
*/
|
|
area = next;
|
|
}
|
|
if (err)
|
|
return NULL;
|
|
khugepaged_enter_vma_merge(area, vm_flags);
|
|
return area;
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
/*
|
|
* Rough compatibility check to quickly see if it's even worth looking
|
|
* at sharing an anon_vma.
|
|
*
|
|
* They need to have the same vm_file, and the flags can only differ
|
|
* in things that mprotect may change.
|
|
*
|
|
* NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
|
|
* we can merge the two vma's. For example, we refuse to merge a vma if
|
|
* there is a vm_ops->close() function, because that indicates that the
|
|
* driver is doing some kind of reference counting. But that doesn't
|
|
* really matter for the anon_vma sharing case.
|
|
*/
|
|
static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
|
|
{
|
|
return a->vm_end == b->vm_start &&
|
|
mpol_equal(vma_policy(a), vma_policy(b)) &&
|
|
a->vm_file == b->vm_file &&
|
|
!((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
|
|
b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
|
|
}
|
|
|
|
/*
|
|
* Do some basic sanity checking to see if we can re-use the anon_vma
|
|
* from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
|
|
* the same as 'old', the other will be the new one that is trying
|
|
* to share the anon_vma.
|
|
*
|
|
* NOTE! This runs with mm_sem held for reading, so it is possible that
|
|
* the anon_vma of 'old' is concurrently in the process of being set up
|
|
* by another page fault trying to merge _that_. But that's ok: if it
|
|
* is being set up, that automatically means that it will be a singleton
|
|
* acceptable for merging, so we can do all of this optimistically. But
|
|
* we do that READ_ONCE() to make sure that we never re-load the pointer.
|
|
*
|
|
* IOW: that the "list_is_singular()" test on the anon_vma_chain only
|
|
* matters for the 'stable anon_vma' case (ie the thing we want to avoid
|
|
* is to return an anon_vma that is "complex" due to having gone through
|
|
* a fork).
|
|
*
|
|
* We also make sure that the two vma's are compatible (adjacent,
|
|
* and with the same memory policies). That's all stable, even with just
|
|
* a read lock on the mm_sem.
|
|
*/
|
|
static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
|
|
{
|
|
if (anon_vma_compatible(a, b)) {
|
|
struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
|
|
|
|
if (anon_vma && list_is_singular(&old->anon_vma_chain))
|
|
return anon_vma;
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
/*
|
|
* find_mergeable_anon_vma is used by anon_vma_prepare, to check
|
|
* neighbouring vmas for a suitable anon_vma, before it goes off
|
|
* to allocate a new anon_vma. It checks because a repetitive
|
|
* sequence of mprotects and faults may otherwise lead to distinct
|
|
* anon_vmas being allocated, preventing vma merge in subsequent
|
|
* mprotect.
|
|
*/
|
|
struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
|
|
{
|
|
struct anon_vma *anon_vma = NULL;
|
|
|
|
/* Try next first. */
|
|
if (vma->vm_next) {
|
|
anon_vma = reusable_anon_vma(vma->vm_next, vma, vma->vm_next);
|
|
if (anon_vma)
|
|
return anon_vma;
|
|
}
|
|
|
|
/* Try prev next. */
|
|
if (vma->vm_prev)
|
|
anon_vma = reusable_anon_vma(vma->vm_prev, vma->vm_prev, vma);
|
|
|
|
/*
|
|
* We might reach here with anon_vma == NULL if we can't find
|
|
* any reusable anon_vma.
|
|
* There's no absolute need to look only at touching neighbours:
|
|
* we could search further afield for "compatible" anon_vmas.
|
|
* But it would probably just be a waste of time searching,
|
|
* or lead to too many vmas hanging off the same anon_vma.
|
|
* We're trying to allow mprotect remerging later on,
|
|
* not trying to minimize memory used for anon_vmas.
|
|
*/
|
|
return anon_vma;
|
|
}
|
|
|
|
/*
|
|
* If a hint addr is less than mmap_min_addr change hint to be as
|
|
* low as possible but still greater than mmap_min_addr
|
|
*/
|
|
static inline unsigned long round_hint_to_min(unsigned long hint)
|
|
{
|
|
hint &= PAGE_MASK;
|
|
if (((void *)hint != NULL) &&
|
|
(hint < mmap_min_addr))
|
|
return PAGE_ALIGN(mmap_min_addr);
|
|
return hint;
|
|
}
|
|
|
|
int mlock_future_check(struct mm_struct *mm, unsigned long flags,
|
|
unsigned long len)
|
|
{
|
|
unsigned long locked, lock_limit;
|
|
|
|
/* mlock MCL_FUTURE? */
|
|
if (flags & VM_LOCKED) {
|
|
locked = len >> PAGE_SHIFT;
|
|
locked += mm->locked_vm;
|
|
lock_limit = rlimit(RLIMIT_MEMLOCK);
|
|
lock_limit >>= PAGE_SHIFT;
|
|
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
|
|
return -EAGAIN;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
|
|
{
|
|
if (S_ISREG(inode->i_mode))
|
|
return MAX_LFS_FILESIZE;
|
|
|
|
if (S_ISBLK(inode->i_mode))
|
|
return MAX_LFS_FILESIZE;
|
|
|
|
if (S_ISSOCK(inode->i_mode))
|
|
return MAX_LFS_FILESIZE;
|
|
|
|
/* Special "we do even unsigned file positions" case */
|
|
if (file->f_mode & FMODE_UNSIGNED_OFFSET)
|
|
return 0;
|
|
|
|
/* Yes, random drivers might want more. But I'm tired of buggy drivers */
|
|
return ULONG_MAX;
|
|
}
|
|
|
|
static inline bool file_mmap_ok(struct file *file, struct inode *inode,
|
|
unsigned long pgoff, unsigned long len)
|
|
{
|
|
u64 maxsize = file_mmap_size_max(file, inode);
|
|
|
|
if (maxsize && len > maxsize)
|
|
return false;
|
|
maxsize -= len;
|
|
if (pgoff > maxsize >> PAGE_SHIFT)
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
/*
|
|
* The caller must write-lock current->mm->mmap_lock.
|
|
*/
|
|
unsigned long do_mmap(struct file *file, unsigned long addr,
|
|
unsigned long len, unsigned long prot,
|
|
unsigned long flags, unsigned long pgoff,
|
|
unsigned long *populate, struct list_head *uf)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
vm_flags_t vm_flags;
|
|
int pkey = 0;
|
|
|
|
*populate = 0;
|
|
|
|
if (!len)
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* Does the application expect PROT_READ to imply PROT_EXEC?
|
|
*
|
|
* (the exception is when the underlying filesystem is noexec
|
|
* mounted, in which case we dont add PROT_EXEC.)
|
|
*/
|
|
if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
|
|
if (!(file && path_noexec(&file->f_path)))
|
|
prot |= PROT_EXEC;
|
|
|
|
/* force arch specific MAP_FIXED handling in get_unmapped_area */
|
|
if (flags & MAP_FIXED_NOREPLACE)
|
|
flags |= MAP_FIXED;
|
|
|
|
if (!(flags & MAP_FIXED))
|
|
addr = round_hint_to_min(addr);
|
|
|
|
/* Careful about overflows.. */
|
|
len = PAGE_ALIGN(len);
|
|
if (!len)
|
|
return -ENOMEM;
|
|
|
|
/* offset overflow? */
|
|
if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
|
|
return -EOVERFLOW;
|
|
|
|
/* Too many mappings? */
|
|
if (mm->map_count > sysctl_max_map_count)
|
|
return -ENOMEM;
|
|
|
|
/* Obtain the address to map to. we verify (or select) it and ensure
|
|
* that it represents a valid section of the address space.
|
|
*/
|
|
addr = get_unmapped_area(file, addr, len, pgoff, flags);
|
|
if (IS_ERR_VALUE(addr))
|
|
return addr;
|
|
|
|
if (flags & MAP_FIXED_NOREPLACE) {
|
|
if (find_vma_intersection(mm, addr, addr + len))
|
|
return -EEXIST;
|
|
}
|
|
|
|
if (prot == PROT_EXEC) {
|
|
pkey = execute_only_pkey(mm);
|
|
if (pkey < 0)
|
|
pkey = 0;
|
|
}
|
|
|
|
/* Do simple checking here so the lower-level routines won't have
|
|
* to. we assume access permissions have been handled by the open
|
|
* of the memory object, so we don't do any here.
|
|
*/
|
|
vm_flags = calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
|
|
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
|
|
|
|
if (flags & MAP_LOCKED)
|
|
if (!can_do_mlock())
|
|
return -EPERM;
|
|
|
|
if (mlock_future_check(mm, vm_flags, len))
|
|
return -EAGAIN;
|
|
|
|
if (file) {
|
|
struct inode *inode = file_inode(file);
|
|
unsigned long flags_mask;
|
|
|
|
if (!file_mmap_ok(file, inode, pgoff, len))
|
|
return -EOVERFLOW;
|
|
|
|
flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags;
|
|
|
|
switch (flags & MAP_TYPE) {
|
|
case MAP_SHARED:
|
|
/*
|
|
* Force use of MAP_SHARED_VALIDATE with non-legacy
|
|
* flags. E.g. MAP_SYNC is dangerous to use with
|
|
* MAP_SHARED as you don't know which consistency model
|
|
* you will get. We silently ignore unsupported flags
|
|
* with MAP_SHARED to preserve backward compatibility.
|
|
*/
|
|
flags &= LEGACY_MAP_MASK;
|
|
fallthrough;
|
|
case MAP_SHARED_VALIDATE:
|
|
if (flags & ~flags_mask)
|
|
return -EOPNOTSUPP;
|
|
if (prot & PROT_WRITE) {
|
|
if (!(file->f_mode & FMODE_WRITE))
|
|
return -EACCES;
|
|
if (IS_SWAPFILE(file->f_mapping->host))
|
|
return -ETXTBSY;
|
|
}
|
|
|
|
/*
|
|
* Make sure we don't allow writing to an append-only
|
|
* file..
|
|
*/
|
|
if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
|
|
return -EACCES;
|
|
|
|
vm_flags |= VM_SHARED | VM_MAYSHARE;
|
|
if (!(file->f_mode & FMODE_WRITE))
|
|
vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
|
|
fallthrough;
|
|
case MAP_PRIVATE:
|
|
if (!(file->f_mode & FMODE_READ))
|
|
return -EACCES;
|
|
if (path_noexec(&file->f_path)) {
|
|
if (vm_flags & VM_EXEC)
|
|
return -EPERM;
|
|
vm_flags &= ~VM_MAYEXEC;
|
|
}
|
|
|
|
if (!file->f_op->mmap)
|
|
return -ENODEV;
|
|
if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
|
|
return -EINVAL;
|
|
break;
|
|
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
} else {
|
|
switch (flags & MAP_TYPE) {
|
|
case MAP_SHARED:
|
|
if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
|
|
return -EINVAL;
|
|
/*
|
|
* Ignore pgoff.
|
|
*/
|
|
pgoff = 0;
|
|
vm_flags |= VM_SHARED | VM_MAYSHARE;
|
|
break;
|
|
case MAP_PRIVATE:
|
|
/*
|
|
* Set pgoff according to addr for anon_vma.
|
|
*/
|
|
pgoff = addr >> PAGE_SHIFT;
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Set 'VM_NORESERVE' if we should not account for the
|
|
* memory use of this mapping.
|
|
*/
|
|
if (flags & MAP_NORESERVE) {
|
|
/* We honor MAP_NORESERVE if allowed to overcommit */
|
|
if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
|
|
vm_flags |= VM_NORESERVE;
|
|
|
|
/* hugetlb applies strict overcommit unless MAP_NORESERVE */
|
|
if (file && is_file_hugepages(file))
|
|
vm_flags |= VM_NORESERVE;
|
|
}
|
|
|
|
addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
|
|
if (!IS_ERR_VALUE(addr) &&
|
|
((vm_flags & VM_LOCKED) ||
|
|
(flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
|
|
*populate = len;
|
|
return addr;
|
|
}
|
|
|
|
unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
|
|
unsigned long prot, unsigned long flags,
|
|
unsigned long fd, unsigned long pgoff)
|
|
{
|
|
struct file *file = NULL;
|
|
unsigned long retval;
|
|
|
|
if (!(flags & MAP_ANONYMOUS)) {
|
|
audit_mmap_fd(fd, flags);
|
|
file = fget(fd);
|
|
if (!file)
|
|
return -EBADF;
|
|
if (is_file_hugepages(file)) {
|
|
len = ALIGN(len, huge_page_size(hstate_file(file)));
|
|
} else if (unlikely(flags & MAP_HUGETLB)) {
|
|
retval = -EINVAL;
|
|
goto out_fput;
|
|
}
|
|
} else if (flags & MAP_HUGETLB) {
|
|
struct ucounts *ucounts = NULL;
|
|
struct hstate *hs;
|
|
|
|
hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
|
|
if (!hs)
|
|
return -EINVAL;
|
|
|
|
len = ALIGN(len, huge_page_size(hs));
|
|
/*
|
|
* VM_NORESERVE is used because the reservations will be
|
|
* taken when vm_ops->mmap() is called
|
|
* A dummy user value is used because we are not locking
|
|
* memory so no accounting is necessary
|
|
*/
|
|
file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
|
|
VM_NORESERVE,
|
|
&ucounts, HUGETLB_ANONHUGE_INODE,
|
|
(flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
|
|
if (IS_ERR(file))
|
|
return PTR_ERR(file);
|
|
}
|
|
|
|
retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
|
|
out_fput:
|
|
if (file)
|
|
fput(file);
|
|
return retval;
|
|
}
|
|
|
|
SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
|
|
unsigned long, prot, unsigned long, flags,
|
|
unsigned long, fd, unsigned long, pgoff)
|
|
{
|
|
return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
|
|
}
|
|
|
|
#ifdef __ARCH_WANT_SYS_OLD_MMAP
|
|
struct mmap_arg_struct {
|
|
unsigned long addr;
|
|
unsigned long len;
|
|
unsigned long prot;
|
|
unsigned long flags;
|
|
unsigned long fd;
|
|
unsigned long offset;
|
|
};
|
|
|
|
SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
|
|
{
|
|
struct mmap_arg_struct a;
|
|
|
|
if (copy_from_user(&a, arg, sizeof(a)))
|
|
return -EFAULT;
|
|
if (offset_in_page(a.offset))
|
|
return -EINVAL;
|
|
|
|
return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
|
|
a.offset >> PAGE_SHIFT);
|
|
}
|
|
#endif /* __ARCH_WANT_SYS_OLD_MMAP */
|
|
|
|
/*
|
|
* Some shared mappings will want the pages marked read-only
|
|
* to track write events. If so, we'll downgrade vm_page_prot
|
|
* to the private version (using protection_map[] without the
|
|
* VM_SHARED bit).
|
|
*/
|
|
int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
|
|
{
|
|
vm_flags_t vm_flags = vma->vm_flags;
|
|
const struct vm_operations_struct *vm_ops = vma->vm_ops;
|
|
|
|
/* If it was private or non-writable, the write bit is already clear */
|
|
if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
|
|
return 0;
|
|
|
|
/* The backer wishes to know when pages are first written to? */
|
|
if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite))
|
|
return 1;
|
|
|
|
/* The open routine did something to the protections that pgprot_modify
|
|
* won't preserve? */
|
|
if (pgprot_val(vm_page_prot) !=
|
|
pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags)))
|
|
return 0;
|
|
|
|
/*
|
|
* Do we need to track softdirty? hugetlb does not support softdirty
|
|
* tracking yet.
|
|
*/
|
|
if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY) &&
|
|
!is_vm_hugetlb_page(vma))
|
|
return 1;
|
|
|
|
/* Specialty mapping? */
|
|
if (vm_flags & VM_PFNMAP)
|
|
return 0;
|
|
|
|
/* Can the mapping track the dirty pages? */
|
|
return vma->vm_file && vma->vm_file->f_mapping &&
|
|
mapping_can_writeback(vma->vm_file->f_mapping);
|
|
}
|
|
|
|
/*
|
|
* We account for memory if it's a private writeable mapping,
|
|
* not hugepages and VM_NORESERVE wasn't set.
|
|
*/
|
|
static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
|
|
{
|
|
/*
|
|
* hugetlb has its own accounting separate from the core VM
|
|
* VM_HUGETLB may not be set yet so we cannot check for that flag.
|
|
*/
|
|
if (file && is_file_hugepages(file))
|
|
return 0;
|
|
|
|
return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
|
|
}
|
|
|
|
unsigned long mmap_region(struct file *file, unsigned long addr,
|
|
unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
|
|
struct list_head *uf)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma, *prev, *merge;
|
|
int error;
|
|
struct rb_node **rb_link, *rb_parent;
|
|
unsigned long charged = 0;
|
|
|
|
/* Check against address space limit. */
|
|
if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
|
|
unsigned long nr_pages;
|
|
|
|
/*
|
|
* MAP_FIXED may remove pages of mappings that intersects with
|
|
* requested mapping. Account for the pages it would unmap.
|
|
*/
|
|
nr_pages = count_vma_pages_range(mm, addr, addr + len);
|
|
|
|
if (!may_expand_vm(mm, vm_flags,
|
|
(len >> PAGE_SHIFT) - nr_pages))
|
|
return -ENOMEM;
|
|
}
|
|
|
|
/* Clear old maps, set up prev, rb_link, rb_parent, and uf */
|
|
if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf))
|
|
return -ENOMEM;
|
|
/*
|
|
* Private writable mapping: check memory availability
|
|
*/
|
|
if (accountable_mapping(file, vm_flags)) {
|
|
charged = len >> PAGE_SHIFT;
|
|
if (security_vm_enough_memory_mm(mm, charged))
|
|
return -ENOMEM;
|
|
vm_flags |= VM_ACCOUNT;
|
|
}
|
|
|
|
/*
|
|
* Can we just expand an old mapping?
|
|
*/
|
|
vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
|
|
NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
|
|
if (vma)
|
|
goto out;
|
|
|
|
/*
|
|
* Determine the object being mapped and call the appropriate
|
|
* specific mapper. the address has already been validated, but
|
|
* not unmapped, but the maps are removed from the list.
|
|
*/
|
|
vma = vm_area_alloc(mm);
|
|
if (!vma) {
|
|
error = -ENOMEM;
|
|
goto unacct_error;
|
|
}
|
|
|
|
vma->vm_start = addr;
|
|
vma->vm_end = addr + len;
|
|
vma->vm_flags = vm_flags;
|
|
vma->vm_page_prot = vm_get_page_prot(vm_flags);
|
|
vma->vm_pgoff = pgoff;
|
|
|
|
if (file) {
|
|
if (vm_flags & VM_SHARED) {
|
|
error = mapping_map_writable(file->f_mapping);
|
|
if (error)
|
|
goto free_vma;
|
|
}
|
|
|
|
vma->vm_file = get_file(file);
|
|
error = call_mmap(file, vma);
|
|
if (error)
|
|
goto unmap_and_free_vma;
|
|
|
|
/* Can addr have changed??
|
|
*
|
|
* Answer: Yes, several device drivers can do it in their
|
|
* f_op->mmap method. -DaveM
|
|
* Bug: If addr is changed, prev, rb_link, rb_parent should
|
|
* be updated for vma_link()
|
|
*/
|
|
WARN_ON_ONCE(addr != vma->vm_start);
|
|
|
|
addr = vma->vm_start;
|
|
|
|
/* If vm_flags changed after call_mmap(), we should try merge vma again
|
|
* as we may succeed this time.
|
|
*/
|
|
if (unlikely(vm_flags != vma->vm_flags && prev)) {
|
|
merge = vma_merge(mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags,
|
|
NULL, vma->vm_file, vma->vm_pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
|
|
if (merge) {
|
|
/* ->mmap() can change vma->vm_file and fput the original file. So
|
|
* fput the vma->vm_file here or we would add an extra fput for file
|
|
* and cause general protection fault ultimately.
|
|
*/
|
|
/* fput happens within vm_area_free */
|
|
vm_area_free(vma);
|
|
vma = merge;
|
|
/* Update vm_flags to pick up the change. */
|
|
vm_flags = vma->vm_flags;
|
|
goto unmap_writable;
|
|
}
|
|
}
|
|
|
|
vm_flags = vma->vm_flags;
|
|
} else if (vm_flags & VM_SHARED) {
|
|
error = shmem_zero_setup(vma);
|
|
if (error)
|
|
goto free_vma;
|
|
} else {
|
|
vma_set_anonymous(vma);
|
|
}
|
|
|
|
/* Allow architectures to sanity-check the vm_flags */
|
|
if (!arch_validate_flags(vma->vm_flags)) {
|
|
error = -EINVAL;
|
|
if (file)
|
|
goto close_and_free_vma;
|
|
else
|
|
goto free_vma;
|
|
}
|
|
|
|
vma_link(mm, vma, prev, rb_link, rb_parent);
|
|
/* Once vma denies write, undo our temporary denial count */
|
|
unmap_writable:
|
|
if (file && vm_flags & VM_SHARED)
|
|
mapping_unmap_writable(file->f_mapping);
|
|
file = vma->vm_file;
|
|
out:
|
|
perf_event_mmap(vma);
|
|
|
|
vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
|
|
if (vm_flags & VM_LOCKED) {
|
|
if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
|
|
is_vm_hugetlb_page(vma) ||
|
|
vma == get_gate_vma(current->mm))
|
|
vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
|
|
else
|
|
mm->locked_vm += (len >> PAGE_SHIFT);
|
|
}
|
|
|
|
if (file)
|
|
uprobe_mmap(vma);
|
|
|
|
/*
|
|
* New (or expanded) vma always get soft dirty status.
|
|
* Otherwise user-space soft-dirty page tracker won't
|
|
* be able to distinguish situation when vma area unmapped,
|
|
* then new mapped in-place (which must be aimed as
|
|
* a completely new data area).
|
|
*/
|
|
vma->vm_flags |= VM_SOFTDIRTY;
|
|
|
|
vma_set_page_prot(vma);
|
|
|
|
trace_android_vh_mmap_region(vma, addr);
|
|
|
|
return addr;
|
|
|
|
close_and_free_vma:
|
|
if (vma->vm_ops && vma->vm_ops->close)
|
|
vma->vm_ops->close(vma);
|
|
unmap_and_free_vma:
|
|
fput(vma->vm_file);
|
|
vma->vm_file = NULL;
|
|
|
|
/* Undo any partial mapping done by a device driver. */
|
|
unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
|
|
if (vm_flags & VM_SHARED)
|
|
mapping_unmap_writable(file->f_mapping);
|
|
free_vma:
|
|
VM_BUG_ON(vma->vm_file);
|
|
vm_area_free(vma);
|
|
unacct_error:
|
|
if (charged)
|
|
vm_unacct_memory(charged);
|
|
return error;
|
|
}
|
|
|
|
static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
|
|
{
|
|
/*
|
|
* We implement the search by looking for an rbtree node that
|
|
* immediately follows a suitable gap. That is,
|
|
* - gap_start = vma->vm_prev->vm_end <= info->high_limit - length;
|
|
* - gap_end = vma->vm_start >= info->low_limit + length;
|
|
* - gap_end - gap_start >= length
|
|
*/
|
|
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma;
|
|
unsigned long length, low_limit, high_limit, gap_start, gap_end;
|
|
|
|
/* Adjust search length to account for worst case alignment overhead */
|
|
length = info->length + info->align_mask;
|
|
if (length < info->length)
|
|
return -ENOMEM;
|
|
|
|
/* Adjust search limits by the desired length */
|
|
if (info->high_limit < length)
|
|
return -ENOMEM;
|
|
high_limit = info->high_limit - length;
|
|
|
|
if (info->low_limit > high_limit)
|
|
return -ENOMEM;
|
|
low_limit = info->low_limit + length;
|
|
|
|
/* Check if rbtree root looks promising */
|
|
if (RB_EMPTY_ROOT(&mm->mm_rb))
|
|
goto check_highest;
|
|
vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
|
|
if (vma->rb_subtree_gap < length)
|
|
goto check_highest;
|
|
|
|
while (true) {
|
|
/* Visit left subtree if it looks promising */
|
|
gap_end = vm_start_gap(vma);
|
|
if (gap_end >= low_limit && vma->vm_rb.rb_left) {
|
|
struct vm_area_struct *left =
|
|
rb_entry(vma->vm_rb.rb_left,
|
|
struct vm_area_struct, vm_rb);
|
|
if (left->rb_subtree_gap >= length) {
|
|
vma = left;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
|
|
check_current:
|
|
/* Check if current node has a suitable gap */
|
|
if (gap_start > high_limit)
|
|
return -ENOMEM;
|
|
if (gap_end >= low_limit &&
|
|
gap_end > gap_start && gap_end - gap_start >= length)
|
|
goto found;
|
|
|
|
/* Visit right subtree if it looks promising */
|
|
if (vma->vm_rb.rb_right) {
|
|
struct vm_area_struct *right =
|
|
rb_entry(vma->vm_rb.rb_right,
|
|
struct vm_area_struct, vm_rb);
|
|
if (right->rb_subtree_gap >= length) {
|
|
vma = right;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
/* Go back up the rbtree to find next candidate node */
|
|
while (true) {
|
|
struct rb_node *prev = &vma->vm_rb;
|
|
if (!rb_parent(prev))
|
|
goto check_highest;
|
|
vma = rb_entry(rb_parent(prev),
|
|
struct vm_area_struct, vm_rb);
|
|
if (prev == vma->vm_rb.rb_left) {
|
|
gap_start = vm_end_gap(vma->vm_prev);
|
|
gap_end = vm_start_gap(vma);
|
|
goto check_current;
|
|
}
|
|
}
|
|
}
|
|
|
|
check_highest:
|
|
/* Check highest gap, which does not precede any rbtree node */
|
|
gap_start = mm->highest_vm_end;
|
|
gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */
|
|
if (gap_start > high_limit)
|
|
return -ENOMEM;
|
|
|
|
found:
|
|
/* We found a suitable gap. Clip it with the original low_limit. */
|
|
if (gap_start < info->low_limit)
|
|
gap_start = info->low_limit;
|
|
|
|
/* Adjust gap address to the desired alignment */
|
|
gap_start += (info->align_offset - gap_start) & info->align_mask;
|
|
|
|
VM_BUG_ON(gap_start + info->length > info->high_limit);
|
|
VM_BUG_ON(gap_start + info->length > gap_end);
|
|
return gap_start;
|
|
}
|
|
|
|
static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma;
|
|
unsigned long length, low_limit, high_limit, gap_start, gap_end;
|
|
|
|
/* Adjust search length to account for worst case alignment overhead */
|
|
length = info->length + info->align_mask;
|
|
if (length < info->length)
|
|
return -ENOMEM;
|
|
|
|
/*
|
|
* Adjust search limits by the desired length.
|
|
* See implementation comment at top of unmapped_area().
|
|
*/
|
|
gap_end = info->high_limit;
|
|
if (gap_end < length)
|
|
return -ENOMEM;
|
|
high_limit = gap_end - length;
|
|
|
|
if (info->low_limit > high_limit)
|
|
return -ENOMEM;
|
|
low_limit = info->low_limit + length;
|
|
|
|
/* Check highest gap, which does not precede any rbtree node */
|
|
gap_start = mm->highest_vm_end;
|
|
if (gap_start <= high_limit)
|
|
goto found_highest;
|
|
|
|
/* Check if rbtree root looks promising */
|
|
if (RB_EMPTY_ROOT(&mm->mm_rb))
|
|
return -ENOMEM;
|
|
vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
|
|
if (vma->rb_subtree_gap < length)
|
|
return -ENOMEM;
|
|
|
|
while (true) {
|
|
/* Visit right subtree if it looks promising */
|
|
gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
|
|
if (gap_start <= high_limit && vma->vm_rb.rb_right) {
|
|
struct vm_area_struct *right =
|
|
rb_entry(vma->vm_rb.rb_right,
|
|
struct vm_area_struct, vm_rb);
|
|
if (right->rb_subtree_gap >= length) {
|
|
vma = right;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
check_current:
|
|
/* Check if current node has a suitable gap */
|
|
gap_end = vm_start_gap(vma);
|
|
if (gap_end < low_limit)
|
|
return -ENOMEM;
|
|
if (gap_start <= high_limit &&
|
|
gap_end > gap_start && gap_end - gap_start >= length)
|
|
goto found;
|
|
|
|
/* Visit left subtree if it looks promising */
|
|
if (vma->vm_rb.rb_left) {
|
|
struct vm_area_struct *left =
|
|
rb_entry(vma->vm_rb.rb_left,
|
|
struct vm_area_struct, vm_rb);
|
|
if (left->rb_subtree_gap >= length) {
|
|
vma = left;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
/* Go back up the rbtree to find next candidate node */
|
|
while (true) {
|
|
struct rb_node *prev = &vma->vm_rb;
|
|
if (!rb_parent(prev))
|
|
return -ENOMEM;
|
|
vma = rb_entry(rb_parent(prev),
|
|
struct vm_area_struct, vm_rb);
|
|
if (prev == vma->vm_rb.rb_right) {
|
|
gap_start = vma->vm_prev ?
|
|
vm_end_gap(vma->vm_prev) : 0;
|
|
goto check_current;
|
|
}
|
|
}
|
|
}
|
|
|
|
found:
|
|
/* We found a suitable gap. Clip it with the original high_limit. */
|
|
if (gap_end > info->high_limit)
|
|
gap_end = info->high_limit;
|
|
|
|
found_highest:
|
|
/* Compute highest gap address at the desired alignment */
|
|
gap_end -= info->length;
|
|
gap_end -= (gap_end - info->align_offset) & info->align_mask;
|
|
|
|
VM_BUG_ON(gap_end < info->low_limit);
|
|
VM_BUG_ON(gap_end < gap_start);
|
|
return gap_end;
|
|
}
|
|
|
|
/*
|
|
* Search for an unmapped address range.
|
|
*
|
|
* We are looking for a range that:
|
|
* - does not intersect with any VMA;
|
|
* - is contained within the [low_limit, high_limit) interval;
|
|
* - is at least the desired size.
|
|
* - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
|
|
*/
|
|
unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
|
|
{
|
|
unsigned long addr;
|
|
|
|
if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
|
|
addr = unmapped_area_topdown(info);
|
|
else
|
|
addr = unmapped_area(info);
|
|
|
|
trace_vm_unmapped_area(addr, info);
|
|
return addr;
|
|
}
|
|
|
|
/* Get an address range which is currently unmapped.
|
|
* For shmat() with addr=0.
|
|
*
|
|
* Ugly calling convention alert:
|
|
* Return value with the low bits set means error value,
|
|
* ie
|
|
* if (ret & ~PAGE_MASK)
|
|
* error = ret;
|
|
*
|
|
* This function "knows" that -ENOMEM has the bits set.
|
|
*/
|
|
#ifndef HAVE_ARCH_UNMAPPED_AREA
|
|
unsigned long
|
|
arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|
unsigned long len, unsigned long pgoff, unsigned long flags)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma, *prev;
|
|
struct vm_unmapped_area_info info;
|
|
const unsigned long mmap_end = arch_get_mmap_end(addr);
|
|
|
|
if (len > mmap_end - mmap_min_addr)
|
|
return -ENOMEM;
|
|
|
|
if (flags & MAP_FIXED)
|
|
return addr;
|
|
|
|
if (addr) {
|
|
addr = PAGE_ALIGN(addr);
|
|
vma = find_vma_prev(mm, addr, &prev);
|
|
if (mmap_end - len >= addr && addr >= mmap_min_addr &&
|
|
(!vma || addr + len <= vm_start_gap(vma)) &&
|
|
(!prev || addr >= vm_end_gap(prev)))
|
|
return addr;
|
|
}
|
|
|
|
info.flags = 0;
|
|
info.length = len;
|
|
info.low_limit = mm->mmap_base;
|
|
info.high_limit = mmap_end;
|
|
info.align_mask = 0;
|
|
info.align_offset = 0;
|
|
return vm_unmapped_area(&info);
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* This mmap-allocator allocates new areas top-down from below the
|
|
* stack's low limit (the base):
|
|
*/
|
|
#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
|
unsigned long
|
|
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
|
|
unsigned long len, unsigned long pgoff,
|
|
unsigned long flags)
|
|
{
|
|
struct vm_area_struct *vma, *prev;
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_unmapped_area_info info;
|
|
const unsigned long mmap_end = arch_get_mmap_end(addr);
|
|
|
|
/* requested length too big for entire address space */
|
|
if (len > mmap_end - mmap_min_addr)
|
|
return -ENOMEM;
|
|
|
|
if (flags & MAP_FIXED)
|
|
return addr;
|
|
|
|
/* requesting a specific address */
|
|
if (addr) {
|
|
addr = PAGE_ALIGN(addr);
|
|
vma = find_vma_prev(mm, addr, &prev);
|
|
if (mmap_end - len >= addr && addr >= mmap_min_addr &&
|
|
(!vma || addr + len <= vm_start_gap(vma)) &&
|
|
(!prev || addr >= vm_end_gap(prev)))
|
|
return addr;
|
|
}
|
|
|
|
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
|
|
info.length = len;
|
|
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
|
|
info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
|
|
info.align_mask = 0;
|
|
info.align_offset = 0;
|
|
addr = vm_unmapped_area(&info);
|
|
|
|
/*
|
|
* A failed mmap() very likely causes application failure,
|
|
* so fall back to the bottom-up function here. This scenario
|
|
* can happen with large stack limits and large mmap()
|
|
* allocations.
|
|
*/
|
|
if (offset_in_page(addr)) {
|
|
VM_BUG_ON(addr != -ENOMEM);
|
|
info.flags = 0;
|
|
info.low_limit = TASK_UNMAPPED_BASE;
|
|
info.high_limit = mmap_end;
|
|
addr = vm_unmapped_area(&info);
|
|
}
|
|
|
|
return addr;
|
|
}
|
|
#endif
|
|
|
|
unsigned long
|
|
get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
|
|
unsigned long pgoff, unsigned long flags)
|
|
{
|
|
unsigned long (*get_area)(struct file *, unsigned long,
|
|
unsigned long, unsigned long, unsigned long);
|
|
|
|
unsigned long error = arch_mmap_check(addr, len, flags);
|
|
if (error)
|
|
return error;
|
|
|
|
/* Careful about overflows.. */
|
|
if (len > TASK_SIZE)
|
|
return -ENOMEM;
|
|
|
|
get_area = current->mm->get_unmapped_area;
|
|
if (file) {
|
|
if (file->f_op->get_unmapped_area)
|
|
get_area = file->f_op->get_unmapped_area;
|
|
} else if (flags & MAP_SHARED) {
|
|
/*
|
|
* mmap_region() will call shmem_zero_setup() to create a file,
|
|
* so use shmem's get_unmapped_area in case it can be huge.
|
|
* do_mmap() will clear pgoff, so match alignment.
|
|
*/
|
|
pgoff = 0;
|
|
get_area = shmem_get_unmapped_area;
|
|
}
|
|
|
|
addr = get_area(file, addr, len, pgoff, flags);
|
|
if (IS_ERR_VALUE(addr))
|
|
return addr;
|
|
|
|
if (addr > TASK_SIZE - len)
|
|
return -ENOMEM;
|
|
if (offset_in_page(addr))
|
|
return -EINVAL;
|
|
|
|
error = security_mmap_addr(addr);
|
|
return error ? error : addr;
|
|
}
|
|
|
|
EXPORT_SYMBOL(get_unmapped_area);
|
|
|
|
struct vm_area_struct *find_vma_from_tree(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
struct rb_node *rb_node;
|
|
struct vm_area_struct *vma = NULL;
|
|
|
|
rb_node = mm->mm_rb.rb_node;
|
|
|
|
while (rb_node) {
|
|
struct vm_area_struct *tmp;
|
|
|
|
tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
|
|
|
|
if (tmp->vm_end > addr) {
|
|
vma = tmp;
|
|
if (tmp->vm_start <= addr)
|
|
break;
|
|
rb_node = rb_node->rb_left;
|
|
} else
|
|
rb_node = rb_node->rb_right;
|
|
}
|
|
|
|
return vma;
|
|
}
|
|
|
|
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
|
|
struct vm_area_struct *__find_vma(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
struct vm_area_struct *vma;
|
|
|
|
/* Check the cache first. */
|
|
vma = vmacache_find(mm, addr);
|
|
if (likely(vma))
|
|
return vma;
|
|
|
|
vma = find_vma_from_tree(mm, addr);
|
|
|
|
if (vma)
|
|
vmacache_update(addr, vma);
|
|
return vma;
|
|
}
|
|
|
|
EXPORT_SYMBOL(__find_vma);
|
|
|
|
/*
|
|
* Same as find_vma, but also return a pointer to the previous VMA in *pprev.
|
|
*/
|
|
struct vm_area_struct *
|
|
find_vma_prev(struct mm_struct *mm, unsigned long addr,
|
|
struct vm_area_struct **pprev)
|
|
{
|
|
struct vm_area_struct *vma;
|
|
|
|
vma = find_vma(mm, addr);
|
|
if (vma) {
|
|
*pprev = vma->vm_prev;
|
|
} else {
|
|
struct rb_node *rb_node = rb_last(&mm->mm_rb);
|
|
|
|
*pprev = rb_node ? rb_entry(rb_node, struct vm_area_struct, vm_rb) : NULL;
|
|
}
|
|
return vma;
|
|
}
|
|
|
|
/*
|
|
* Verify that the stack growth is acceptable and
|
|
* update accounting. This is shared with both the
|
|
* grow-up and grow-down cases.
|
|
*/
|
|
static int acct_stack_growth(struct vm_area_struct *vma,
|
|
unsigned long size, unsigned long grow)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
unsigned long new_start;
|
|
|
|
/* address space limit tests */
|
|
if (!may_expand_vm(mm, vma->vm_flags, grow))
|
|
return -ENOMEM;
|
|
|
|
/* Stack limit test */
|
|
if (size > rlimit(RLIMIT_STACK))
|
|
return -ENOMEM;
|
|
|
|
/* mlock limit tests */
|
|
if (vma->vm_flags & VM_LOCKED) {
|
|
unsigned long locked;
|
|
unsigned long limit;
|
|
locked = mm->locked_vm + grow;
|
|
limit = rlimit(RLIMIT_MEMLOCK);
|
|
limit >>= PAGE_SHIFT;
|
|
if (locked > limit && !capable(CAP_IPC_LOCK))
|
|
return -ENOMEM;
|
|
}
|
|
|
|
/* Check to ensure the stack will not grow into a hugetlb-only region */
|
|
new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
|
|
vma->vm_end - size;
|
|
if (is_hugepage_only_range(vma->vm_mm, new_start, size))
|
|
return -EFAULT;
|
|
|
|
/*
|
|
* Overcommit.. This must be the final test, as it will
|
|
* update security statistics.
|
|
*/
|
|
if (security_vm_enough_memory_mm(mm, grow))
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
}
|
|
|
|
#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
|
|
/*
|
|
* PA-RISC uses this for its stack; IA64 for its Register Backing Store.
|
|
* vma is the last one with address > vma->vm_end. Have to extend vma.
|
|
*/
|
|
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
struct vm_area_struct *next;
|
|
unsigned long gap_addr;
|
|
int error = 0;
|
|
|
|
if (!(vma->vm_flags & VM_GROWSUP))
|
|
return -EFAULT;
|
|
|
|
/* Guard against exceeding limits of the address space. */
|
|
address &= PAGE_MASK;
|
|
if (address >= (TASK_SIZE & PAGE_MASK))
|
|
return -ENOMEM;
|
|
address += PAGE_SIZE;
|
|
|
|
/* Enforce stack_guard_gap */
|
|
gap_addr = address + stack_guard_gap;
|
|
|
|
/* Guard against overflow */
|
|
if (gap_addr < address || gap_addr > TASK_SIZE)
|
|
gap_addr = TASK_SIZE;
|
|
|
|
next = vma->vm_next;
|
|
if (next && next->vm_start < gap_addr && vma_is_accessible(next)) {
|
|
if (!(next->vm_flags & VM_GROWSUP))
|
|
return -ENOMEM;
|
|
/* Check that both stack segments have the same anon_vma? */
|
|
}
|
|
|
|
/* We must make sure the anon_vma is allocated. */
|
|
if (unlikely(anon_vma_prepare(vma)))
|
|
return -ENOMEM;
|
|
|
|
/*
|
|
* vma->vm_start/vm_end cannot change under us because the caller
|
|
* is required to hold the mmap_lock in read mode. We need the
|
|
* anon_vma lock to serialize against concurrent expand_stacks.
|
|
*/
|
|
anon_vma_lock_write(vma->anon_vma);
|
|
|
|
/* Somebody else might have raced and expanded it already */
|
|
if (address > vma->vm_end) {
|
|
unsigned long size, grow;
|
|
|
|
size = address - vma->vm_start;
|
|
grow = (address - vma->vm_end) >> PAGE_SHIFT;
|
|
|
|
error = -ENOMEM;
|
|
if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
|
|
error = acct_stack_growth(vma, size, grow);
|
|
if (!error) {
|
|
/*
|
|
* vma_gap_update() doesn't support concurrent
|
|
* updates, but we only hold a shared mmap_lock
|
|
* lock here, so we need to protect against
|
|
* concurrent vma expansions.
|
|
* anon_vma_lock_write() doesn't help here, as
|
|
* we don't guarantee that all growable vmas
|
|
* in a mm share the same root anon vma.
|
|
* So, we reuse mm->page_table_lock to guard
|
|
* against concurrent vma expansions.
|
|
*/
|
|
spin_lock(&mm->page_table_lock);
|
|
if (vma->vm_flags & VM_LOCKED)
|
|
mm->locked_vm += grow;
|
|
vm_stat_account(mm, vma->vm_flags, grow);
|
|
anon_vma_interval_tree_pre_update_vma(vma);
|
|
vma->vm_end = address;
|
|
anon_vma_interval_tree_post_update_vma(vma);
|
|
if (vma->vm_next)
|
|
vma_gap_update(vma->vm_next);
|
|
else
|
|
mm->highest_vm_end = vm_end_gap(vma);
|
|
spin_unlock(&mm->page_table_lock);
|
|
|
|
perf_event_mmap(vma);
|
|
}
|
|
}
|
|
}
|
|
anon_vma_unlock_write(vma->anon_vma);
|
|
khugepaged_enter_vma_merge(vma, vma->vm_flags);
|
|
validate_mm(mm);
|
|
return error;
|
|
}
|
|
#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
|
|
|
|
/*
|
|
* vma is the first one with address < vma->vm_start. Have to extend vma.
|
|
*/
|
|
int expand_downwards(struct vm_area_struct *vma,
|
|
unsigned long address)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
struct vm_area_struct *prev;
|
|
int error = 0;
|
|
|
|
address &= PAGE_MASK;
|
|
if (address < mmap_min_addr)
|
|
return -EPERM;
|
|
|
|
/* Enforce stack_guard_gap */
|
|
prev = vma->vm_prev;
|
|
/* Check that both stack segments have the same anon_vma? */
|
|
if (prev && !(prev->vm_flags & VM_GROWSDOWN) &&
|
|
vma_is_accessible(prev)) {
|
|
if (address - prev->vm_end < stack_guard_gap)
|
|
return -ENOMEM;
|
|
}
|
|
|
|
/* We must make sure the anon_vma is allocated. */
|
|
if (unlikely(anon_vma_prepare(vma)))
|
|
return -ENOMEM;
|
|
|
|
/*
|
|
* vma->vm_start/vm_end cannot change under us because the caller
|
|
* is required to hold the mmap_lock in read mode. We need the
|
|
* anon_vma lock to serialize against concurrent expand_stacks.
|
|
*/
|
|
anon_vma_lock_write(vma->anon_vma);
|
|
|
|
/* Somebody else might have raced and expanded it already */
|
|
if (address < vma->vm_start) {
|
|
unsigned long size, grow;
|
|
|
|
size = vma->vm_end - address;
|
|
grow = (vma->vm_start - address) >> PAGE_SHIFT;
|
|
|
|
error = -ENOMEM;
|
|
if (grow <= vma->vm_pgoff) {
|
|
error = acct_stack_growth(vma, size, grow);
|
|
if (!error) {
|
|
/*
|
|
* vma_gap_update() doesn't support concurrent
|
|
* updates, but we only hold a shared mmap_lock
|
|
* lock here, so we need to protect against
|
|
* concurrent vma expansions.
|
|
* anon_vma_lock_write() doesn't help here, as
|
|
* we don't guarantee that all growable vmas
|
|
* in a mm share the same root anon vma.
|
|
* So, we reuse mm->page_table_lock to guard
|
|
* against concurrent vma expansions.
|
|
*/
|
|
spin_lock(&mm->page_table_lock);
|
|
if (vma->vm_flags & VM_LOCKED)
|
|
mm->locked_vm += grow;
|
|
vm_stat_account(mm, vma->vm_flags, grow);
|
|
anon_vma_interval_tree_pre_update_vma(vma);
|
|
vma->vm_start = address;
|
|
vma->vm_pgoff -= grow;
|
|
anon_vma_interval_tree_post_update_vma(vma);
|
|
vma_gap_update(vma);
|
|
spin_unlock(&mm->page_table_lock);
|
|
|
|
perf_event_mmap(vma);
|
|
}
|
|
}
|
|
}
|
|
anon_vma_unlock_write(vma->anon_vma);
|
|
khugepaged_enter_vma_merge(vma, vma->vm_flags);
|
|
validate_mm(mm);
|
|
return error;
|
|
}
|
|
|
|
/* enforced gap between the expanding stack and other mappings. */
|
|
unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
|
|
|
|
static int __init cmdline_parse_stack_guard_gap(char *p)
|
|
{
|
|
unsigned long val;
|
|
char *endptr;
|
|
|
|
val = simple_strtoul(p, &endptr, 10);
|
|
if (!*endptr)
|
|
stack_guard_gap = val << PAGE_SHIFT;
|
|
|
|
return 1;
|
|
}
|
|
__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
|
|
|
|
#ifdef CONFIG_STACK_GROWSUP
|
|
int expand_stack(struct vm_area_struct *vma, unsigned long address)
|
|
{
|
|
return expand_upwards(vma, address);
|
|
}
|
|
|
|
struct vm_area_struct *
|
|
find_extend_vma(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
struct vm_area_struct *vma, *prev;
|
|
|
|
addr &= PAGE_MASK;
|
|
vma = find_vma_prev(mm, addr, &prev);
|
|
if (vma && (vma->vm_start <= addr))
|
|
return vma;
|
|
/* don't alter vm_end if the coredump is running */
|
|
if (!prev || expand_stack(prev, addr))
|
|
return NULL;
|
|
if (prev->vm_flags & VM_LOCKED)
|
|
populate_vma_page_range(prev, addr, prev->vm_end, NULL);
|
|
return prev;
|
|
}
|
|
#else
|
|
int expand_stack(struct vm_area_struct *vma, unsigned long address)
|
|
{
|
|
return expand_downwards(vma, address);
|
|
}
|
|
|
|
struct vm_area_struct *
|
|
find_extend_vma(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
struct vm_area_struct *vma;
|
|
unsigned long start;
|
|
|
|
addr &= PAGE_MASK;
|
|
vma = find_vma(mm, addr);
|
|
if (!vma)
|
|
return NULL;
|
|
if (vma->vm_start <= addr)
|
|
return vma;
|
|
if (!(vma->vm_flags & VM_GROWSDOWN))
|
|
return NULL;
|
|
start = vma->vm_start;
|
|
if (expand_stack(vma, addr))
|
|
return NULL;
|
|
if (vma->vm_flags & VM_LOCKED)
|
|
populate_vma_page_range(vma, addr, start, NULL);
|
|
return vma;
|
|
}
|
|
#endif
|
|
|
|
EXPORT_SYMBOL_GPL(find_extend_vma);
|
|
|
|
/*
|
|
* Ok - we have the memory areas we should free on the vma list,
|
|
* so release them, and do the vma updates.
|
|
*
|
|
* Called with the mm semaphore held.
|
|
*/
|
|
static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
|
|
{
|
|
unsigned long nr_accounted = 0;
|
|
|
|
/* Update high watermark before we lower total_vm */
|
|
update_hiwater_vm(mm);
|
|
do {
|
|
long nrpages = vma_pages(vma);
|
|
|
|
if (vma->vm_flags & VM_ACCOUNT)
|
|
nr_accounted += nrpages;
|
|
vm_stat_account(mm, vma->vm_flags, -nrpages);
|
|
vma = remove_vma(vma);
|
|
} while (vma);
|
|
vm_unacct_memory(nr_accounted);
|
|
validate_mm(mm);
|
|
}
|
|
|
|
/*
|
|
* Get rid of page table information in the indicated region.
|
|
*
|
|
* Called with the mm semaphore held.
|
|
*/
|
|
static void unmap_region(struct mm_struct *mm,
|
|
struct vm_area_struct *vma, struct vm_area_struct *prev,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
struct vm_area_struct *next = vma_next(mm, prev);
|
|
struct mmu_gather tlb;
|
|
struct vm_area_struct *cur_vma;
|
|
|
|
lru_add_drain();
|
|
tlb_gather_mmu(&tlb, mm);
|
|
update_hiwater_rss(mm);
|
|
unmap_vmas(&tlb, vma, start, end);
|
|
|
|
/*
|
|
* Ensure we have no stale TLB entries by the time this mapping is
|
|
* removed from the rmap.
|
|
* Note that we don't have to worry about nested flushes here because
|
|
* we're holding the mm semaphore for removing the mapping - so any
|
|
* concurrent flush in this region has to be coming through the rmap,
|
|
* and we synchronize against that using the rmap lock.
|
|
*/
|
|
for (cur_vma = vma; cur_vma; cur_vma = cur_vma->vm_next) {
|
|
if ((cur_vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) != 0) {
|
|
tlb_flush_mmu(&tlb);
|
|
break;
|
|
}
|
|
}
|
|
|
|
free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
|
|
next ? next->vm_start : USER_PGTABLES_CEILING);
|
|
tlb_finish_mmu(&tlb);
|
|
}
|
|
|
|
/*
|
|
* Create a list of vma's touched by the unmap, removing them from the mm's
|
|
* vma list as we go..
|
|
*/
|
|
static bool
|
|
detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
struct vm_area_struct *prev, unsigned long end)
|
|
{
|
|
struct vm_area_struct **insertion_point;
|
|
struct vm_area_struct *tail_vma = NULL;
|
|
|
|
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
|
|
vma->vm_prev = NULL;
|
|
do {
|
|
vma_rb_erase(vma, &mm->mm_rb);
|
|
mm->map_count--;
|
|
tail_vma = vma;
|
|
vma = vma->vm_next;
|
|
} while (vma && vma->vm_start < end);
|
|
*insertion_point = vma;
|
|
if (vma) {
|
|
vma->vm_prev = prev;
|
|
vma_gap_update(vma);
|
|
} else
|
|
mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
|
|
tail_vma->vm_next = NULL;
|
|
|
|
/* Kill the cache */
|
|
vmacache_invalidate(mm);
|
|
|
|
/*
|
|
* Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
|
|
* VM_GROWSUP VMA. Such VMAs can change their size under
|
|
* down_read(mmap_lock) and collide with the VMA we are about to unmap.
|
|
*/
|
|
if (vma && (vma->vm_flags & VM_GROWSDOWN))
|
|
return false;
|
|
if (prev && (prev->vm_flags & VM_GROWSUP))
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
/*
|
|
* __split_vma() bypasses sysctl_max_map_count checking. We use this where it
|
|
* has already been checked or doesn't make sense to fail.
|
|
*/
|
|
int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
unsigned long addr, int new_below)
|
|
{
|
|
struct vm_area_struct *new;
|
|
int err;
|
|
|
|
if (vma->vm_ops && vma->vm_ops->may_split) {
|
|
err = vma->vm_ops->may_split(vma, addr);
|
|
if (err)
|
|
return err;
|
|
}
|
|
|
|
new = vm_area_dup(vma);
|
|
if (!new)
|
|
return -ENOMEM;
|
|
|
|
if (new_below)
|
|
new->vm_end = addr;
|
|
else {
|
|
new->vm_start = addr;
|
|
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
|
|
}
|
|
|
|
err = vma_dup_policy(vma, new);
|
|
if (err)
|
|
goto out_free_vma;
|
|
|
|
err = anon_vma_clone(new, vma);
|
|
if (err)
|
|
goto out_free_mpol;
|
|
|
|
if (new->vm_file)
|
|
get_file(new->vm_file);
|
|
|
|
if (new->vm_ops && new->vm_ops->open)
|
|
new->vm_ops->open(new);
|
|
|
|
if (new_below)
|
|
err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
|
|
((addr - new->vm_start) >> PAGE_SHIFT), new);
|
|
else
|
|
err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
|
|
|
|
/* Success. */
|
|
if (!err)
|
|
return 0;
|
|
|
|
/* Clean everything up if vma_adjust failed. */
|
|
if (new->vm_ops && new->vm_ops->close)
|
|
new->vm_ops->close(new);
|
|
if (new->vm_file)
|
|
fput(new->vm_file);
|
|
unlink_anon_vmas(new);
|
|
out_free_mpol:
|
|
mpol_put(vma_policy(new));
|
|
out_free_vma:
|
|
new->vm_file = NULL; /* prevents fput within vm_area_free() */
|
|
vm_area_free(new);
|
|
return err;
|
|
}
|
|
|
|
/*
|
|
* Split a vma into two pieces at address 'addr', a new vma is allocated
|
|
* either for the first part or the tail.
|
|
*/
|
|
int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
unsigned long addr, int new_below)
|
|
{
|
|
if (mm->map_count >= sysctl_max_map_count)
|
|
return -ENOMEM;
|
|
|
|
return __split_vma(mm, vma, addr, new_below);
|
|
}
|
|
|
|
static inline void
|
|
unlock_range(struct vm_area_struct *start, unsigned long limit)
|
|
{
|
|
struct mm_struct *mm = start->vm_mm;
|
|
struct vm_area_struct *tmp = start;
|
|
|
|
while (tmp && tmp->vm_start < limit) {
|
|
if (tmp->vm_flags & VM_LOCKED) {
|
|
mm->locked_vm -= vma_pages(tmp);
|
|
munlock_vma_pages_all(tmp);
|
|
}
|
|
|
|
tmp = tmp->vm_next;
|
|
}
|
|
}
|
|
|
|
/* Munmap is split into 2 main parts -- this part which finds
|
|
* what needs doing, and the areas themselves, which do the
|
|
* work. This now handles partial unmappings.
|
|
* Jeremy Fitzhardinge <jeremy@goop.org>
|
|
*/
|
|
int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
|
|
struct list_head *uf, bool downgrade)
|
|
{
|
|
unsigned long end;
|
|
struct vm_area_struct *vma, *prev, *last;
|
|
|
|
if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
|
|
return -EINVAL;
|
|
|
|
len = PAGE_ALIGN(len);
|
|
end = start + len;
|
|
if (len == 0)
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* arch_unmap() might do unmaps itself. It must be called
|
|
* and finish any rbtree manipulation before this code
|
|
* runs and also starts to manipulate the rbtree.
|
|
*/
|
|
arch_unmap(mm, start, end);
|
|
|
|
/* Find the first overlapping VMA where start < vma->vm_end */
|
|
vma = find_vma_intersection(mm, start, end);
|
|
if (!vma)
|
|
return 0;
|
|
prev = vma->vm_prev;
|
|
|
|
/*
|
|
* If we need to split any vma, do it now to save pain later.
|
|
*
|
|
* Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
|
|
* unmapped vm_area_struct will remain in use: so lower split_vma
|
|
* places tmp vma above, and higher split_vma places tmp vma below.
|
|
*/
|
|
if (start > vma->vm_start) {
|
|
int error;
|
|
|
|
/*
|
|
* Make sure that map_count on return from munmap() will
|
|
* not exceed its limit; but let map_count go just above
|
|
* its limit temporarily, to help free resources as expected.
|
|
*/
|
|
if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
|
|
return -ENOMEM;
|
|
|
|
error = __split_vma(mm, vma, start, 0);
|
|
if (error)
|
|
return error;
|
|
prev = vma;
|
|
}
|
|
|
|
/* Does it split the last one? */
|
|
last = find_vma(mm, end);
|
|
if (last && end > last->vm_start) {
|
|
int error = __split_vma(mm, last, end, 1);
|
|
if (error)
|
|
return error;
|
|
}
|
|
vma = vma_next(mm, prev);
|
|
|
|
if (unlikely(uf)) {
|
|
/*
|
|
* If userfaultfd_unmap_prep returns an error the vmas
|
|
* will remain split, but userland will get a
|
|
* highly unexpected error anyway. This is no
|
|
* different than the case where the first of the two
|
|
* __split_vma fails, but we don't undo the first
|
|
* split, despite we could. This is unlikely enough
|
|
* failure that it's not worth optimizing it for.
|
|
*/
|
|
int error = userfaultfd_unmap_prep(vma, start, end, uf);
|
|
if (error)
|
|
return error;
|
|
}
|
|
|
|
/*
|
|
* unlock any mlock()ed ranges before detaching vmas
|
|
*/
|
|
if (mm->locked_vm)
|
|
unlock_range(vma, end);
|
|
|
|
/* Detach vmas from rbtree */
|
|
if (!detach_vmas_to_be_unmapped(mm, vma, prev, end))
|
|
downgrade = false;
|
|
|
|
if (downgrade)
|
|
mmap_write_downgrade(mm);
|
|
|
|
unmap_region(mm, vma, prev, start, end);
|
|
|
|
/* Fix up all other VM information */
|
|
remove_vma_list(mm, vma);
|
|
|
|
return downgrade ? 1 : 0;
|
|
}
|
|
|
|
int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
|
|
struct list_head *uf)
|
|
{
|
|
return __do_munmap(mm, start, len, uf, false);
|
|
}
|
|
|
|
static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
|
|
{
|
|
int ret;
|
|
struct mm_struct *mm = current->mm;
|
|
LIST_HEAD(uf);
|
|
|
|
if (mmap_write_lock_killable(mm))
|
|
return -EINTR;
|
|
|
|
ret = __do_munmap(mm, start, len, &uf, downgrade);
|
|
/*
|
|
* Returning 1 indicates mmap_lock is downgraded.
|
|
* But 1 is not legal return value of vm_munmap() and munmap(), reset
|
|
* it to 0 before return.
|
|
*/
|
|
if (ret == 1) {
|
|
mmap_read_unlock(mm);
|
|
ret = 0;
|
|
} else
|
|
mmap_write_unlock(mm);
|
|
|
|
userfaultfd_unmap_complete(mm, &uf);
|
|
return ret;
|
|
}
|
|
|
|
int vm_munmap(unsigned long start, size_t len)
|
|
{
|
|
return __vm_munmap(start, len, false);
|
|
}
|
|
EXPORT_SYMBOL(vm_munmap);
|
|
|
|
SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
|
|
{
|
|
addr = untagged_addr(addr);
|
|
profile_munmap(addr);
|
|
return __vm_munmap(addr, len, true);
|
|
}
|
|
|
|
|
|
/*
|
|
* Emulation of deprecated remap_file_pages() syscall.
|
|
*/
|
|
SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
|
|
unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
|
|
{
|
|
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma;
|
|
unsigned long populate = 0;
|
|
unsigned long ret = -EINVAL;
|
|
struct file *file;
|
|
|
|
pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.rst.\n",
|
|
current->comm, current->pid);
|
|
|
|
if (prot)
|
|
return ret;
|
|
start = start & PAGE_MASK;
|
|
size = size & PAGE_MASK;
|
|
|
|
if (start + size <= start)
|
|
return ret;
|
|
|
|
/* Does pgoff wrap? */
|
|
if (pgoff + (size >> PAGE_SHIFT) < pgoff)
|
|
return ret;
|
|
|
|
if (mmap_write_lock_killable(mm))
|
|
return -EINTR;
|
|
|
|
vma = vma_lookup(mm, start);
|
|
|
|
if (!vma || !(vma->vm_flags & VM_SHARED))
|
|
goto out;
|
|
|
|
if (start + size > vma->vm_end) {
|
|
struct vm_area_struct *next;
|
|
|
|
for (next = vma->vm_next; next; next = next->vm_next) {
|
|
/* hole between vmas ? */
|
|
if (next->vm_start != next->vm_prev->vm_end)
|
|
goto out;
|
|
|
|
if (next->vm_file != vma->vm_file)
|
|
goto out;
|
|
|
|
if (next->vm_flags != vma->vm_flags)
|
|
goto out;
|
|
|
|
if (start + size <= next->vm_end)
|
|
break;
|
|
}
|
|
|
|
if (!next)
|
|
goto out;
|
|
}
|
|
|
|
prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
|
|
prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
|
|
prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
|
|
|
|
flags &= MAP_NONBLOCK;
|
|
flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
|
|
if (vma->vm_flags & VM_LOCKED)
|
|
flags |= MAP_LOCKED;
|
|
|
|
file = get_file(vma->vm_file);
|
|
ret = do_mmap(vma->vm_file, start, size,
|
|
prot, flags, pgoff, &populate, NULL);
|
|
fput(file);
|
|
out:
|
|
mmap_write_unlock(mm);
|
|
if (populate)
|
|
mm_populate(ret, populate);
|
|
if (!IS_ERR_VALUE(ret))
|
|
ret = 0;
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* this is really a simplified "do_mmap". it only handles
|
|
* anonymous maps. eventually we may be able to do some
|
|
* brk-specific accounting here.
|
|
*/
|
|
static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma, *prev;
|
|
struct rb_node **rb_link, *rb_parent;
|
|
pgoff_t pgoff = addr >> PAGE_SHIFT;
|
|
int error;
|
|
unsigned long mapped_addr;
|
|
|
|
/* Until we need other flags, refuse anything except VM_EXEC. */
|
|
if ((flags & (~VM_EXEC)) != 0)
|
|
return -EINVAL;
|
|
flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
|
|
|
|
mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
|
|
if (IS_ERR_VALUE(mapped_addr))
|
|
return mapped_addr;
|
|
|
|
error = mlock_future_check(mm, mm->def_flags, len);
|
|
if (error)
|
|
return error;
|
|
|
|
/* Clear old maps, set up prev, rb_link, rb_parent, and uf */
|
|
if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf))
|
|
return -ENOMEM;
|
|
|
|
/* Check against address space limits *after* clearing old maps... */
|
|
if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
|
|
return -ENOMEM;
|
|
|
|
if (mm->map_count > sysctl_max_map_count)
|
|
return -ENOMEM;
|
|
|
|
if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
|
|
return -ENOMEM;
|
|
|
|
/* Can we just expand an old private anonymous mapping? */
|
|
vma = vma_merge(mm, prev, addr, addr + len, flags,
|
|
NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
|
|
if (vma)
|
|
goto out;
|
|
|
|
/*
|
|
* create a vma struct for an anonymous mapping
|
|
*/
|
|
vma = vm_area_alloc(mm);
|
|
if (!vma) {
|
|
vm_unacct_memory(len >> PAGE_SHIFT);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
vma_set_anonymous(vma);
|
|
vma->vm_start = addr;
|
|
vma->vm_end = addr + len;
|
|
vma->vm_pgoff = pgoff;
|
|
vma->vm_flags = flags;
|
|
vma->vm_page_prot = vm_get_page_prot(flags);
|
|
vma_link(mm, vma, prev, rb_link, rb_parent);
|
|
out:
|
|
perf_event_mmap(vma);
|
|
mm->total_vm += len >> PAGE_SHIFT;
|
|
mm->data_vm += len >> PAGE_SHIFT;
|
|
if (flags & VM_LOCKED)
|
|
mm->locked_vm += (len >> PAGE_SHIFT);
|
|
vma->vm_flags |= VM_SOFTDIRTY;
|
|
return 0;
|
|
}
|
|
|
|
int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
unsigned long len;
|
|
int ret;
|
|
bool populate;
|
|
LIST_HEAD(uf);
|
|
|
|
len = PAGE_ALIGN(request);
|
|
if (len < request)
|
|
return -ENOMEM;
|
|
if (!len)
|
|
return 0;
|
|
|
|
if (mmap_write_lock_killable(mm))
|
|
return -EINTR;
|
|
|
|
ret = do_brk_flags(addr, len, flags, &uf);
|
|
populate = ((mm->def_flags & VM_LOCKED) != 0);
|
|
mmap_write_unlock(mm);
|
|
userfaultfd_unmap_complete(mm, &uf);
|
|
if (populate && !ret)
|
|
mm_populate(addr, len);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(vm_brk_flags);
|
|
|
|
int vm_brk(unsigned long addr, unsigned long len)
|
|
{
|
|
return vm_brk_flags(addr, len, 0);
|
|
}
|
|
EXPORT_SYMBOL(vm_brk);
|
|
|
|
/* Release all mmaps. */
|
|
void exit_mmap(struct mm_struct *mm)
|
|
{
|
|
struct mmu_gather tlb;
|
|
struct vm_area_struct *vma;
|
|
unsigned long nr_accounted = 0;
|
|
|
|
/* mm's last user has gone, and its about to be pulled down */
|
|
mmu_notifier_release(mm);
|
|
|
|
if (unlikely(mm_is_oom_victim(mm))) {
|
|
/*
|
|
* Manually reap the mm to free as much memory as possible.
|
|
* Then, as the oom reaper does, set MMF_OOM_SKIP to disregard
|
|
* this mm from further consideration. Taking mm->mmap_lock for
|
|
* write after setting MMF_OOM_SKIP will guarantee that the oom
|
|
* reaper will not run on this mm again after mmap_lock is
|
|
* dropped.
|
|
*
|
|
* Nothing can be holding mm->mmap_lock here and the above call
|
|
* to mmu_notifier_release(mm) ensures mmu notifier callbacks in
|
|
* __oom_reap_task_mm() will not block.
|
|
*
|
|
* This needs to be done before calling unlock_range(),
|
|
* which clears VM_LOCKED, otherwise the oom reaper cannot
|
|
* reliably test it.
|
|
*/
|
|
(void)__oom_reap_task_mm(mm);
|
|
|
|
set_bit(MMF_OOM_SKIP, &mm->flags);
|
|
}
|
|
|
|
mmap_write_lock(mm);
|
|
if (mm->locked_vm)
|
|
unlock_range(mm->mmap, ULONG_MAX);
|
|
|
|
arch_exit_mmap(mm);
|
|
|
|
vma = mm->mmap;
|
|
if (!vma) {
|
|
/* Can happen if dup_mmap() received an OOM */
|
|
mmap_write_unlock(mm);
|
|
return;
|
|
}
|
|
|
|
lru_add_drain();
|
|
flush_cache_mm(mm);
|
|
tlb_gather_mmu_fullmm(&tlb, mm);
|
|
/* update_hiwater_rss(mm) here? but nobody should be looking */
|
|
/* Use -1 here to ensure all VMAs in the mm are unmapped */
|
|
unmap_vmas(&tlb, vma, 0, -1);
|
|
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
|
|
tlb_finish_mmu(&tlb);
|
|
|
|
/* Walk the list again, actually closing and freeing it. */
|
|
while (vma) {
|
|
if (vma->vm_flags & VM_ACCOUNT)
|
|
nr_accounted += vma_pages(vma);
|
|
vma = remove_vma(vma);
|
|
cond_resched();
|
|
}
|
|
mm->mmap = NULL;
|
|
mmap_write_unlock(mm);
|
|
vm_unacct_memory(nr_accounted);
|
|
}
|
|
|
|
/* Insert vm structure into process list sorted by address
|
|
* and into the inode's i_mmap tree. If vm_file is non-NULL
|
|
* then i_mmap_rwsem is taken here.
|
|
*/
|
|
int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
|
|
{
|
|
struct vm_area_struct *prev;
|
|
struct rb_node **rb_link, *rb_parent;
|
|
|
|
if (find_vma_links(mm, vma->vm_start, vma->vm_end,
|
|
&prev, &rb_link, &rb_parent))
|
|
return -ENOMEM;
|
|
if ((vma->vm_flags & VM_ACCOUNT) &&
|
|
security_vm_enough_memory_mm(mm, vma_pages(vma)))
|
|
return -ENOMEM;
|
|
|
|
/*
|
|
* The vm_pgoff of a purely anonymous vma should be irrelevant
|
|
* until its first write fault, when page's anon_vma and index
|
|
* are set. But now set the vm_pgoff it will almost certainly
|
|
* end up with (unless mremap moves it elsewhere before that
|
|
* first wfault), so /proc/pid/maps tells a consistent story.
|
|
*
|
|
* By setting it to reflect the virtual start address of the
|
|
* vma, merges and splits can happen in a seamless way, just
|
|
* using the existing file pgoff checks and manipulations.
|
|
* Similarly in do_mmap and in do_brk_flags.
|
|
*/
|
|
if (vma_is_anonymous(vma)) {
|
|
BUG_ON(vma->anon_vma);
|
|
vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
|
|
}
|
|
|
|
vma_link(mm, vma, prev, rb_link, rb_parent);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Copy the vma structure to a new location in the same mm,
|
|
* prior to moving page table entries, to effect an mremap move.
|
|
*/
|
|
struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
|
|
unsigned long addr, unsigned long len, pgoff_t pgoff,
|
|
bool *need_rmap_locks)
|
|
{
|
|
struct vm_area_struct *vma = *vmap;
|
|
unsigned long vma_start = vma->vm_start;
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
struct vm_area_struct *new_vma, *prev;
|
|
struct rb_node **rb_link, *rb_parent;
|
|
bool faulted_in_anon_vma = true;
|
|
|
|
/*
|
|
* If anonymous vma has not yet been faulted, update new pgoff
|
|
* to match new location, to increase its chance of merging.
|
|
*/
|
|
if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
|
|
pgoff = addr >> PAGE_SHIFT;
|
|
faulted_in_anon_vma = false;
|
|
}
|
|
|
|
if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
|
|
return NULL; /* should never get here */
|
|
new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
|
|
vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
|
|
vma->vm_userfaultfd_ctx, anon_vma_name(vma));
|
|
if (new_vma) {
|
|
/*
|
|
* Source vma may have been merged into new_vma
|
|
*/
|
|
if (unlikely(vma_start >= new_vma->vm_start &&
|
|
vma_start < new_vma->vm_end)) {
|
|
/*
|
|
* The only way we can get a vma_merge with
|
|
* self during an mremap is if the vma hasn't
|
|
* been faulted in yet and we were allowed to
|
|
* reset the dst vma->vm_pgoff to the
|
|
* destination address of the mremap to allow
|
|
* the merge to happen. mremap must change the
|
|
* vm_pgoff linearity between src and dst vmas
|
|
* (in turn preventing a vma_merge) to be
|
|
* safe. It is only safe to keep the vm_pgoff
|
|
* linear if there are no pages mapped yet.
|
|
*/
|
|
VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
|
|
*vmap = vma = new_vma;
|
|
}
|
|
*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
|
|
} else {
|
|
new_vma = vm_area_dup(vma);
|
|
if (!new_vma)
|
|
goto out;
|
|
new_vma->vm_start = addr;
|
|
new_vma->vm_end = addr + len;
|
|
new_vma->vm_pgoff = pgoff;
|
|
if (vma_dup_policy(vma, new_vma))
|
|
goto out_free_vma;
|
|
if (anon_vma_clone(new_vma, vma))
|
|
goto out_free_mempol;
|
|
if (new_vma->vm_file)
|
|
get_file(new_vma->vm_file);
|
|
if (new_vma->vm_ops && new_vma->vm_ops->open)
|
|
new_vma->vm_ops->open(new_vma);
|
|
vma_link(mm, new_vma, prev, rb_link, rb_parent);
|
|
*need_rmap_locks = false;
|
|
}
|
|
return new_vma;
|
|
|
|
out_free_mempol:
|
|
mpol_put(vma_policy(new_vma));
|
|
out_free_vma:
|
|
new_vma->vm_file = NULL; /* Prevent fput within vm_area_free */
|
|
vm_area_free(new_vma);
|
|
out:
|
|
return NULL;
|
|
}
|
|
|
|
/*
|
|
* Return true if the calling process may expand its vm space by the passed
|
|
* number of pages
|
|
*/
|
|
bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
|
|
{
|
|
if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
|
|
return false;
|
|
|
|
if (is_data_mapping(flags) &&
|
|
mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
|
|
/* Workaround for Valgrind */
|
|
if (rlimit(RLIMIT_DATA) == 0 &&
|
|
mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
|
|
return true;
|
|
|
|
pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
|
|
current->comm, current->pid,
|
|
(mm->data_vm + npages) << PAGE_SHIFT,
|
|
rlimit(RLIMIT_DATA),
|
|
ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
|
|
|
|
if (!ignore_rlimit_data)
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
|
|
{
|
|
mm->total_vm += npages;
|
|
|
|
if (is_exec_mapping(flags))
|
|
mm->exec_vm += npages;
|
|
else if (is_stack_mapping(flags))
|
|
mm->stack_vm += npages;
|
|
else if (is_data_mapping(flags))
|
|
mm->data_vm += npages;
|
|
}
|
|
|
|
static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
|
|
|
|
/*
|
|
* Having a close hook prevents vma merging regardless of flags.
|
|
*/
|
|
static void special_mapping_close(struct vm_area_struct *vma)
|
|
{
|
|
}
|
|
|
|
static const char *special_mapping_name(struct vm_area_struct *vma)
|
|
{
|
|
return ((struct vm_special_mapping *)vma->vm_private_data)->name;
|
|
}
|
|
|
|
static int special_mapping_mremap(struct vm_area_struct *new_vma)
|
|
{
|
|
struct vm_special_mapping *sm = new_vma->vm_private_data;
|
|
|
|
if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
|
|
return -EFAULT;
|
|
|
|
if (sm->mremap)
|
|
return sm->mremap(sm, new_vma);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr)
|
|
{
|
|
/*
|
|
* Forbid splitting special mappings - kernel has expectations over
|
|
* the number of pages in mapping. Together with VM_DONTEXPAND
|
|
* the size of vma should stay the same over the special mapping's
|
|
* lifetime.
|
|
*/
|
|
return -EINVAL;
|
|
}
|
|
|
|
static const struct vm_operations_struct special_mapping_vmops = {
|
|
.close = special_mapping_close,
|
|
.fault = special_mapping_fault,
|
|
.mremap = special_mapping_mremap,
|
|
.name = special_mapping_name,
|
|
/* vDSO code relies that VVAR can't be accessed remotely */
|
|
.access = NULL,
|
|
.may_split = special_mapping_split,
|
|
};
|
|
|
|
static const struct vm_operations_struct legacy_special_mapping_vmops = {
|
|
.close = special_mapping_close,
|
|
.fault = special_mapping_fault,
|
|
};
|
|
|
|
static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
|
|
{
|
|
struct vm_area_struct *vma = vmf->vma;
|
|
pgoff_t pgoff;
|
|
struct page **pages;
|
|
|
|
if (vma->vm_ops == &legacy_special_mapping_vmops) {
|
|
pages = vma->vm_private_data;
|
|
} else {
|
|
struct vm_special_mapping *sm = vma->vm_private_data;
|
|
|
|
if (sm->fault)
|
|
return sm->fault(sm, vmf->vma, vmf);
|
|
|
|
pages = sm->pages;
|
|
}
|
|
|
|
for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
|
|
pgoff--;
|
|
|
|
if (*pages) {
|
|
struct page *page = *pages;
|
|
get_page(page);
|
|
vmf->page = page;
|
|
return 0;
|
|
}
|
|
|
|
return VM_FAULT_SIGBUS;
|
|
}
|
|
|
|
static struct vm_area_struct *__install_special_mapping(
|
|
struct mm_struct *mm,
|
|
unsigned long addr, unsigned long len,
|
|
unsigned long vm_flags, void *priv,
|
|
const struct vm_operations_struct *ops)
|
|
{
|
|
int ret;
|
|
struct vm_area_struct *vma;
|
|
|
|
vma = vm_area_alloc(mm);
|
|
if (unlikely(vma == NULL))
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
vma->vm_start = addr;
|
|
vma->vm_end = addr + len;
|
|
|
|
vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
|
|
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
|
|
|
vma->vm_ops = ops;
|
|
vma->vm_private_data = priv;
|
|
|
|
ret = insert_vm_struct(mm, vma);
|
|
if (ret)
|
|
goto out;
|
|
|
|
vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
|
|
|
|
perf_event_mmap(vma);
|
|
|
|
return vma;
|
|
|
|
out:
|
|
vm_area_free(vma);
|
|
return ERR_PTR(ret);
|
|
}
|
|
|
|
bool vma_is_special_mapping(const struct vm_area_struct *vma,
|
|
const struct vm_special_mapping *sm)
|
|
{
|
|
return vma->vm_private_data == sm &&
|
|
(vma->vm_ops == &special_mapping_vmops ||
|
|
vma->vm_ops == &legacy_special_mapping_vmops);
|
|
}
|
|
|
|
/*
|
|
* Called with mm->mmap_lock held for writing.
|
|
* Insert a new vma covering the given region, with the given flags.
|
|
* Its pages are supplied by the given array of struct page *.
|
|
* The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
|
|
* The region past the last page supplied will always produce SIGBUS.
|
|
* The array pointer and the pages it points to are assumed to stay alive
|
|
* for as long as this mapping might exist.
|
|
*/
|
|
struct vm_area_struct *_install_special_mapping(
|
|
struct mm_struct *mm,
|
|
unsigned long addr, unsigned long len,
|
|
unsigned long vm_flags, const struct vm_special_mapping *spec)
|
|
{
|
|
return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
|
|
&special_mapping_vmops);
|
|
}
|
|
|
|
int install_special_mapping(struct mm_struct *mm,
|
|
unsigned long addr, unsigned long len,
|
|
unsigned long vm_flags, struct page **pages)
|
|
{
|
|
struct vm_area_struct *vma = __install_special_mapping(
|
|
mm, addr, len, vm_flags, (void *)pages,
|
|
&legacy_special_mapping_vmops);
|
|
|
|
return PTR_ERR_OR_ZERO(vma);
|
|
}
|
|
|
|
static DEFINE_MUTEX(mm_all_locks_mutex);
|
|
|
|
static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
|
|
{
|
|
if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
|
|
/*
|
|
* The LSB of head.next can't change from under us
|
|
* because we hold the mm_all_locks_mutex.
|
|
*/
|
|
down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
|
|
/*
|
|
* We can safely modify head.next after taking the
|
|
* anon_vma->root->rwsem. If some other vma in this mm shares
|
|
* the same anon_vma we won't take it again.
|
|
*
|
|
* No need of atomic instructions here, head.next
|
|
* can't change from under us thanks to the
|
|
* anon_vma->root->rwsem.
|
|
*/
|
|
if (__test_and_set_bit(0, (unsigned long *)
|
|
&anon_vma->root->rb_root.rb_root.rb_node))
|
|
BUG();
|
|
}
|
|
}
|
|
|
|
static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
|
|
{
|
|
if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
|
|
/*
|
|
* AS_MM_ALL_LOCKS can't change from under us because
|
|
* we hold the mm_all_locks_mutex.
|
|
*
|
|
* Operations on ->flags have to be atomic because
|
|
* even if AS_MM_ALL_LOCKS is stable thanks to the
|
|
* mm_all_locks_mutex, there may be other cpus
|
|
* changing other bitflags in parallel to us.
|
|
*/
|
|
if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
|
|
BUG();
|
|
down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* This operation locks against the VM for all pte/vma/mm related
|
|
* operations that could ever happen on a certain mm. This includes
|
|
* vmtruncate, try_to_unmap, and all page faults.
|
|
*
|
|
* The caller must take the mmap_lock in write mode before calling
|
|
* mm_take_all_locks(). The caller isn't allowed to release the
|
|
* mmap_lock until mm_drop_all_locks() returns.
|
|
*
|
|
* mmap_lock in write mode is required in order to block all operations
|
|
* that could modify pagetables and free pages without need of
|
|
* altering the vma layout. It's also needed in write mode to avoid new
|
|
* anon_vmas to be associated with existing vmas.
|
|
*
|
|
* A single task can't take more than one mm_take_all_locks() in a row
|
|
* or it would deadlock.
|
|
*
|
|
* The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
|
|
* mapping->flags avoid to take the same lock twice, if more than one
|
|
* vma in this mm is backed by the same anon_vma or address_space.
|
|
*
|
|
* We take locks in following order, accordingly to comment at beginning
|
|
* of mm/rmap.c:
|
|
* - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
|
|
* hugetlb mapping);
|
|
* - all i_mmap_rwsem locks;
|
|
* - all anon_vma->rwseml
|
|
*
|
|
* We can take all locks within these types randomly because the VM code
|
|
* doesn't nest them and we protected from parallel mm_take_all_locks() by
|
|
* mm_all_locks_mutex.
|
|
*
|
|
* mm_take_all_locks() and mm_drop_all_locks are expensive operations
|
|
* that may have to take thousand of locks.
|
|
*
|
|
* mm_take_all_locks() can fail if it's interrupted by signals.
|
|
*/
|
|
int mm_take_all_locks(struct mm_struct *mm)
|
|
{
|
|
struct vm_area_struct *vma;
|
|
struct anon_vma_chain *avc;
|
|
|
|
BUG_ON(mmap_read_trylock(mm));
|
|
|
|
mutex_lock(&mm_all_locks_mutex);
|
|
|
|
#if defined(CONFIG_MMU_NOTIFIER) && defined(CONFIG_SPECULATIVE_PAGE_FAULT)
|
|
percpu_down_write(mm->mmu_notifier_lock);
|
|
#endif
|
|
|
|
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
|
if (signal_pending(current))
|
|
goto out_unlock;
|
|
if (vma->vm_file && vma->vm_file->f_mapping &&
|
|
is_vm_hugetlb_page(vma))
|
|
vm_lock_mapping(mm, vma->vm_file->f_mapping);
|
|
}
|
|
|
|
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
|
if (signal_pending(current))
|
|
goto out_unlock;
|
|
if (vma->vm_file && vma->vm_file->f_mapping &&
|
|
!is_vm_hugetlb_page(vma))
|
|
vm_lock_mapping(mm, vma->vm_file->f_mapping);
|
|
}
|
|
|
|
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
|
if (signal_pending(current))
|
|
goto out_unlock;
|
|
if (vma->anon_vma)
|
|
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
|
|
vm_lock_anon_vma(mm, avc->anon_vma);
|
|
}
|
|
|
|
return 0;
|
|
|
|
out_unlock:
|
|
mm_drop_all_locks(mm);
|
|
return -EINTR;
|
|
}
|
|
|
|
static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
|
|
{
|
|
if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
|
|
/*
|
|
* The LSB of head.next can't change to 0 from under
|
|
* us because we hold the mm_all_locks_mutex.
|
|
*
|
|
* We must however clear the bitflag before unlocking
|
|
* the vma so the users using the anon_vma->rb_root will
|
|
* never see our bitflag.
|
|
*
|
|
* No need of atomic instructions here, head.next
|
|
* can't change from under us until we release the
|
|
* anon_vma->root->rwsem.
|
|
*/
|
|
if (!__test_and_clear_bit(0, (unsigned long *)
|
|
&anon_vma->root->rb_root.rb_root.rb_node))
|
|
BUG();
|
|
anon_vma_unlock_write(anon_vma);
|
|
}
|
|
}
|
|
|
|
static void vm_unlock_mapping(struct address_space *mapping)
|
|
{
|
|
if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
|
|
/*
|
|
* AS_MM_ALL_LOCKS can't change to 0 from under us
|
|
* because we hold the mm_all_locks_mutex.
|
|
*/
|
|
i_mmap_unlock_write(mapping);
|
|
if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
|
|
&mapping->flags))
|
|
BUG();
|
|
}
|
|
}
|
|
|
|
/*
|
|
* The mmap_lock cannot be released by the caller until
|
|
* mm_drop_all_locks() returns.
|
|
*/
|
|
void mm_drop_all_locks(struct mm_struct *mm)
|
|
{
|
|
struct vm_area_struct *vma;
|
|
struct anon_vma_chain *avc;
|
|
|
|
BUG_ON(mmap_read_trylock(mm));
|
|
BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
|
|
|
|
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
|
if (vma->anon_vma)
|
|
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
|
|
vm_unlock_anon_vma(avc->anon_vma);
|
|
if (vma->vm_file && vma->vm_file->f_mapping)
|
|
vm_unlock_mapping(vma->vm_file->f_mapping);
|
|
}
|
|
|
|
#if defined(CONFIG_MMU_NOTIFIER) && defined(CONFIG_SPECULATIVE_PAGE_FAULT)
|
|
percpu_up_write(mm->mmu_notifier_lock);
|
|
#endif
|
|
|
|
mutex_unlock(&mm_all_locks_mutex);
|
|
}
|
|
|
|
/*
|
|
* initialise the percpu counter for VM
|
|
*/
|
|
void __init mmap_init(void)
|
|
{
|
|
int ret;
|
|
|
|
ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
|
|
VM_BUG_ON(ret);
|
|
}
|
|
|
|
/*
|
|
* Initialise sysctl_user_reserve_kbytes.
|
|
*
|
|
* This is intended to prevent a user from starting a single memory hogging
|
|
* process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
|
|
* mode.
|
|
*
|
|
* The default value is min(3% of free memory, 128MB)
|
|
* 128MB is enough to recover with sshd/login, bash, and top/kill.
|
|
*/
|
|
static int init_user_reserve(void)
|
|
{
|
|
unsigned long free_kbytes;
|
|
|
|
free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
|
|
|
|
sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
|
|
return 0;
|
|
}
|
|
subsys_initcall(init_user_reserve);
|
|
|
|
/*
|
|
* Initialise sysctl_admin_reserve_kbytes.
|
|
*
|
|
* The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
|
|
* to log in and kill a memory hogging process.
|
|
*
|
|
* Systems with more than 256MB will reserve 8MB, enough to recover
|
|
* with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
|
|
* only reserve 3% of free pages by default.
|
|
*/
|
|
static int init_admin_reserve(void)
|
|
{
|
|
unsigned long free_kbytes;
|
|
|
|
free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
|
|
|
|
sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
|
|
return 0;
|
|
}
|
|
subsys_initcall(init_admin_reserve);
|
|
|
|
/*
|
|
* Reinititalise user and admin reserves if memory is added or removed.
|
|
*
|
|
* The default user reserve max is 128MB, and the default max for the
|
|
* admin reserve is 8MB. These are usually, but not always, enough to
|
|
* enable recovery from a memory hogging process using login/sshd, a shell,
|
|
* and tools like top. It may make sense to increase or even disable the
|
|
* reserve depending on the existence of swap or variations in the recovery
|
|
* tools. So, the admin may have changed them.
|
|
*
|
|
* If memory is added and the reserves have been eliminated or increased above
|
|
* the default max, then we'll trust the admin.
|
|
*
|
|
* If memory is removed and there isn't enough free memory, then we
|
|
* need to reset the reserves.
|
|
*
|
|
* Otherwise keep the reserve set by the admin.
|
|
*/
|
|
static int reserve_mem_notifier(struct notifier_block *nb,
|
|
unsigned long action, void *data)
|
|
{
|
|
unsigned long tmp, free_kbytes;
|
|
|
|
switch (action) {
|
|
case MEM_ONLINE:
|
|
/* Default max is 128MB. Leave alone if modified by operator. */
|
|
tmp = sysctl_user_reserve_kbytes;
|
|
if (0 < tmp && tmp < (1UL << 17))
|
|
init_user_reserve();
|
|
|
|
/* Default max is 8MB. Leave alone if modified by operator. */
|
|
tmp = sysctl_admin_reserve_kbytes;
|
|
if (0 < tmp && tmp < (1UL << 13))
|
|
init_admin_reserve();
|
|
|
|
break;
|
|
case MEM_OFFLINE:
|
|
free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
|
|
|
|
if (sysctl_user_reserve_kbytes > free_kbytes) {
|
|
init_user_reserve();
|
|
pr_info("vm.user_reserve_kbytes reset to %lu\n",
|
|
sysctl_user_reserve_kbytes);
|
|
}
|
|
|
|
if (sysctl_admin_reserve_kbytes > free_kbytes) {
|
|
init_admin_reserve();
|
|
pr_info("vm.admin_reserve_kbytes reset to %lu\n",
|
|
sysctl_admin_reserve_kbytes);
|
|
}
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
return NOTIFY_OK;
|
|
}
|
|
|
|
static struct notifier_block reserve_mem_nb = {
|
|
.notifier_call = reserve_mem_notifier,
|
|
};
|
|
|
|
static int __meminit init_reserve_notifier(void)
|
|
{
|
|
if (register_hotmemory_notifier(&reserve_mem_nb))
|
|
pr_err("Failed registering memory add/remove notifier for admin reserve\n");
|
|
|
|
return 0;
|
|
}
|
|
subsys_initcall(init_reserve_notifier);
|