# By Daniel Rosenberg (98) and others
# Via Greg Kroah-Hartman (219) and others
* google/common/android-4.4-p:
ANDROID: xt_qtaguid: fix UAF race
ANDROID: Make vsock virtio packet buff size configurable
ANDROID: cuttlefish_defconfig: add missing CONFIG_BLK_CGROUP
ANDROID: xt_qtaguid: Remove tag_entry from process list on untag
ANDROID: usb: f_accessory: Don't drop NULL reference in acc_disconnect()
ANDROID: usb: f_accessory: Avoid bitfields for shared variables
ANDROID: usb: f_accessory: Cancel any pending work before teardown
ANDROID: usb: f_accessory: Don't corrupt global state on double registration
ANDROID: usb: f_accessory: Fix teardown ordering in acc_release()
ANDROID: usb: f_accessory: Add refcounting to global 'acc_dev'
UPSTREAM: locking/atomic, kref: Add KREF_INIT()
ANDROID: usb: f_accessory: Wrap '_acc_dev' in get()/put() accessors
ANDROID: usb: f_accessory: Remove useless assignment
ANDROID: usb: f_accessory: Remove useless non-debug prints
ANDROID: usb: f_accessory: Remove stale comments
ANDROID: USB: f_accessory: Check dev pointer before decoding ctrl request
ANDROID: usb: gadget: f_accessory: fix CTS test stuck
ANDROID: cuttlefish_defconfig: Disable CONFIG_KSM
UPSTREAM: arm64: SW PAN: Point saved ttbr0 at the zero page when switching to init_mm
UPSTREAM: arm64: kaslr: Fix up the kernel image alignment
UPSTREAM: sched/fair: Fix FTQ noise bench regression
UPSTREAM: dm verity fec: fix bufio leaks
UPSTREAM: arm64: kernel: restrict /dev/mem read() calls to linear region
UPSTREAM: binder: fix incorrect cmd to binder_stat_br
UPSTREAM: arm64: SW PAN: Update saved ttbr0 value on enter_lazy_tlb
UPSTREAM: KVM: arm/arm64: Check pagesize when allocating a hugepage at Stage 2
UPSTREAM: fs/proc/kcore.c: use probe_kernel_read() instead of memcpy()
UPSTREAM: arm64: fix unwind_frame() for filtered out fn for function graph tracing
UPSTREAM: arm64: kpti: Use early_param for kpti= command-line option
UPSTREAM: arm64: kaslr: ensure randomized quantities are clean to the PoC
UPSTREAM: arm64: kaslr: ensure randomized quantities are clean also when kaslr is off
UPSTREAM: staging: android: vsoc: fix copy_from_user overrun
UPSTREAM: arm64/mm: Inhibit huge-vmap with ptdump
UPSTREAM: drivers/perf: arm_pmu: Fix failure path in PM notifier
UPSTREAM: fs/posix_acl.c: fix kernel-doc warnings
UPSTREAM: ext2: fix debug reference to ext2_xattr_cache
UPSTREAM: arm64: alternative: fix build with clang integrated assembler
UPSTREAM: dm verity fec: fix hash block number in verity_fec_decode
ANDROID: Temporarily disable XFRM_USER_COMPAT filtering
BACKPORT: xfrm/compat: Translate 32-bit user_policy from sockptr
BACKPORT: xfrm/compat: Add 32=>64-bit messages translator
UPSTREAM: xfrm/compat: Attach xfrm dumps to 64=>32 bit translator
BACKPORT: xfrm/compat: Add 64=>32-bit messages translator
BACKPORT: xfrm: Provide API to register translator module
UPSTREAM: mm/sl[uo]b: export __kmalloc_track(_node)_caller
ANDROID: Publish uncompressed Image on aarch64
ANDROID: Makefile: append BUILD_NUMBER to version string when defined
UPSTREAM: binder: fix UAF when releasing todo list
ANDROID: fix a bug in quota2
UPSTREAM: binder: Prevent context manager from incrementing ref 0
BACKPORT: xtables: extend matches and targets with .usersize
UPSTREAM: ip6tables: use match, target and data copy_to_user helpers
UPSTREAM: iptables: use match, target and data copy_to_user helpers
UPSTREAM: xtables: add xt_match, xt_target and data copy_to_user functions
ANDROID: cuttlefish_defconfig: Drop built-in cmdline (except nopti)
ANDROID: cuttlefish defconfig - enable mount/net/uts namespaces.
ANDROID: hid: steam: remove BT controller matching
UPSTREAM: HID: steam: Fix input device disappearing
Revert "ext2: fix empty body warnings when -Wextra is used"
Revert "net: ipv6: Fix processing of RAs in presence of VRF"
UPSTREAM: net: socket: set sock->sk to NULL after calling proto_ops::release()
BACKPORT: xfrm: Allow Output Mark to be Updated Using UPDSA
UPSTREAM: socket: close race condition between sock_close() and sockfs_setattr()
UPSTREAM: net: ipv6: Use passed in table for nexthop lookups
ANDROID: cuttlefish_defconfig: Fix dm-verity related options
Revert "ANDROID: dm verity: add minimum prefetch size"
ANDROID: mnt: Propagate remount correctly
BACKPORT: loop: Add LOOP_SET_BLOCK_SIZE in compat ioctl
UPSTREAM: loop: drop caches if offset or block_size are changed
UPSTREAM: loop: add ioctl for changing logical block size
BACKPORT: block/loop: set hw_sectors
ANDROID: cuttlefish_defconfig: Minimally enable EFI
UPSTREAM: bpf: Explicitly memset the bpf_attr structure
FROMLIST: HID: nintendo: add nintendo switch controller driver
UPSTREAM: staging: most: net: fix buffer overflow
UPSTREAM: ALSA: pcm: Add missing copy ops check before clearing buffer
ANDROID: selinux: modify RTM_GETLINK permission
UPSTREAM: HID: input: map digitizer battery usage
UPSTREAM: HID: input: ignore the battery in OKLICK Laser BTmouse
ANDROID: cuttlefish_defconfig: Disable TRANSPARENT_HUGEPAGE
commit e82b9b0727ff ("vhost: introduce vhost_exceeds_weight()")
UPSTREAM: HID: steam: fix deadlock with input devices.
UPSTREAM: HID: steam: fix boot loop with bluetooth firmware
UPSTREAM: HID: steam: remove input device when a hid client is running.
UPSTREAM: HID: steam: use hid_device.driver_data instead of hid_set_drvdata()
UPSTREAM: HID: steam: add missing fields in client initialization
UPSTREAM: HID: steam: add battery device.
UPSTREAM: HID: add driver for Valve Steam Controller
UPSTREAM: HID: sony: Fix memory corruption issue on cleanup.
UPSTREAM: HID: sony: Fix race condition between rumble and device remove.
UPSTREAM: HID: sony: remove redundant check for -ve err
UPSTREAM: HID: sony: Make sure to unregister sensors on failure
UPSTREAM: HID: sony: Make DS4 bt poll interval adjustable
UPSTREAM: HID: sony: Set proper bit flags on DS4 output report
UPSTREAM: HID: sony: DS4 use brighter LED colors
UPSTREAM: HID: sony: Improve navigation controller axis/button mapping
UPSTREAM: HID: sony: Use DS3 MAC address as unique identifier on USB
UPSTREAM: HID: sony: Perform duplicate device check earlier on
UPSTREAM: HID: sony: Expose DS3 motion sensors through separate device
UPSTREAM: HID: sony: Print error on failure to active DS3 / Navigation controllers
UPSTREAM: HID: sony: DS3 comply to Linux gamepad spec
UPSTREAM: HID: sony: Mark DS4 touchpad device as a pointer
UPSTREAM: HID: sony: Support motion sensor calibration on dongle
UPSTREAM: HID: sony: Make work handling more generic
UPSTREAM: HID: sony: Treat the ds4 dongle as a separate device
UPSTREAM: HID: sony: Remove report descriptor fixup for DS4
UPSTREAM: HID: sony: Report hardware timestamp for DS4 sensor values
UPSTREAM: HID: sony: Calibrate DS4 motion sensors
UPSTREAM: HID: sony: Report DS4 motion sensors through a separate device
UPSTREAM: HID: sony: Fix input device leak when connecting a DS4 twice using USB/BT
UPSTREAM: HID: sony: Use LED_CORE_SUSPENDRESUME
UPSTREAM: HID: sony: Ignore DS4 dongle reports when no device is connected
UPSTREAM: HID: sony: Use DS4 MAC address as unique identifier on USB
UPSTREAM: HID: sony: Fix error handling bug when touchpad registration fails
UPSTREAM: HID: sony: Comply to Linux gamepad spec for DS4
UPSTREAM: HID: sony: Make the DS4 touchpad a separate device
UPSTREAM: HID: sony: Fix memory issue when connecting device using both Bluetooth and USB
UPSTREAM: HID: sony: Adjust value range for motion sensors
UPSTREAM: HID: sony: Handle multiple touch events input record
UPSTREAM: HID: sony: Send ds4 output reports on output end-point
UPSTREAM: HID: sony: Perform CRC check on bluetooth input packets
UPSTREAM: HID: sony: Adjust HID report size name definitions
UPSTREAM: HID: sony: Fix race condition in sony_probe
UPSTREAM: HID: sony: Update copyright and add Dualshock 4 rate control note
UPSTREAM: HID: sony: Defer the initial USB Sixaxis output report
UPSTREAM: HID: sony: Relax duplicate checking for USB-only devices
UPSTREAM: HID: sony: underscores are unnecessary for u8, u16, s32
UPSTREAM: HID: sony: fix some warnings from scripts/checkpatch.pl
UPSTREAM: HID: sony: fix errors from scripts/checkpatch.pl
UPSTREAM: HID: sony: fix a typo in descriptors comments s/Joystik/Joystick/
UPSTREAM: HID: sony: Fixup output reports for the nyko core controller
UPSTREAM: HID: sony: Remove the size check for the Dualshock 4 HID Descriptor
UPSTREAM: HID: sony: Save and restore the controller state on suspend and resume
UPSTREAM: HID: sony: Refactor the output report sending functions
ANDROID: cpufreq: times: add /proc/uid_concurrent_{active,policy}_time
rtlwifi: Fix potential overflow on P2P code
ANDROID: clang: update to 9.0.8 based on r365631c
ANDROID: move up spin_unlock_bh() ahead of remove_proc_entry()
ANDROID: refactor build.config files to remove duplication
ANDROID: usb: gadget: Fix dependency for f_accessory
Remove taskname from lowmemorykiller kill reports
ANDROID: Fixes to locking around handle_lmk_event
Revert "ANDROID: regression introduced override_creds=off"
ANDROID: regression introduced override_creds=off
Fix fallout from changes to bootparam_utils.h
ANDROID: sched: Disallow WALT with CFS bandwidth control
ANDROID: fiq_debugger: remove
ANDROID: arm64: fix leftover RWX when using CONFIG_UNMAP_KERNEL_AT_EL0
ANDROID: fix kernelci build-break in lowmemorykiller
ANDROID: Avoid taking multiple locks in handle_lmk_event
UPSTREAM: net-ipv6-ndisc: add support for RFC7710 RA Captive Portal Identifier
ANDROID: fix binder change in merge of 4.4.183
Fix overlayfs build break
binder: binder: fix possible UAF when freeing buffer
ANDROID: Revert "f2fs: avoid out-of-range memory access"
ANDROID: overlayfs: Fix a regression in commit b24be4acd
ANDROID: enable CONFIG_RTC_DRV_TEST on cuttlefish
ANDROID: xfrm: remove in_compat_syscall() checks
BACKPORT: binder: Set end of SG buffer area properly.
UPSTREAM: binder: check for overflow when alloc for security context
BACKPORT: binder: fix race between munmap() and direct reclaim
ANDROID: cuttlefish 4.4: enable CONFIG_CRYPTO_AES_NI_INTEL=y
ANDROID: cuttlefish_defconfig: Disable DEVTMPFS
ANDROID: cuttlefish_defconfig: Enable CONFIG_CPUSETS and CONFIG_CGROUP_SCHEDTUNE
ANDROID: cuttlefish_defconfig: Drop dead CRYPTO options
UPSTREAM: virtio: new feature to detect IOMMU device quirk
UPSTREAM: vring: Use the DMA API on Xen
UPSTREAM: virtio_ring: Support DMA APIs
UPSTREAM: vring: Introduce vring_use_dma_api()
ANDROID: cuttlefish_defconfig: L2TP/PPTP to OLAC/OPNS
ANDROID: cuttlefish_defconfig: Enable DEBUG_SET_MODULE_RONX
ANDROID: Fix cuttlefish redundant vsock connection.
ANDROID: cuttlefish_defconfig: Enable CONFIG_RTC_HCTOSYS
ANDROID: Move from clang r349610 to r353983c.
Make arm64 serial port config compatible with crosvm
UPSTREAM: virt_wifi: Remove REGULATORY_WIPHY_SELF_MANAGED
ANDROID: cuttlefish_defconfig: Add support for AC97 audio
ANDROID: Move from clang r346389b to r349610.
ANDROID: cuttlefish_defconfig: Enable vsock options
UPSTREAM: vhost/vsock: fix reset orphans race with close timeout
UPSTREAM: vhost/vsock: fix use-after-free in network stack callers
UPSTREAM: vhost: correctly check the iova range when waking virtqueue
UPSTREAM: vhost: synchronize IOTLB message with dev cleanup
UPSTREAM: vhost: fix info leak due to uninitialized memory
UPSTREAM: vhost: fix vhost_vq_access_ok() log check
UPSTREAM: vhost: validate log when IOTLB is enabled
UPSTREAM: vhost_net: add missing lock nesting notation
UPSTREAM: vhost: use mutex_lock_nested() in vhost_dev_lock_vqs()
UPSTREAM: vhost/vsock: fix uninitialized vhost_vsock->guest_cid
UPSTREAM: vhost_net: correctly check tx avail during rx busy polling
UPSTREAM: vsock: use new wait API for vsock_stream_sendmsg()
UPSTREAM: vsock: cancel packets when failing to connect
UPSTREAM: vhost-vsock: add pkt cancel capability
UPSTREAM: vsock: track pkt owner vsock
UPSTREAM: vhost: fix initialization for vq->is_le
UPSTREAM: vhost/vsock: handle vhost_vq_init_access() error
UPSTREAM: vsock: lookup and setup guest_cid inside vhost_vsock_lock
UPSTREAM: vhost-vsock: fix orphan connection reset
UPSTREAM: vsock/virtio: fix src/dst cid format
UPSTREAM: VSOCK: Don't dec ack backlog twice for rejected connections
UPSTREAM: vhost/vsock: drop space available check for TX vq
UPSTREAM: virtio-vsock: fix include guard typo
UPSTREAM: vhost/vsock: fix vhost virtio_vsock_pkt use-after-free
UPSTREAM: VSOCK: Use kvfree()
BACKPORT: vhost: split out vringh Kconfig
UPSTREAM: vhost: drop vringh dependency
UPSTREAM: vhost: drop vringh dependency
UPSTREAM: vhost: detect 32 bit integer wrap around
UPSTREAM: VSOCK: Add Makefile and Kconfig
UPSTREAM: VSOCK: Introduce vhost_vsock.ko
UPSTREAM: VSOCK: Introduce virtio_transport.ko
BACKPORT: VSOCK: Introduce virtio_vsock_common.ko
UPSTREAM: VSOCK: defer sock removal to transports
UPSTREAM: VSOCK: transport-specific vsock_transport functions
UPSTREAM: vsock: make listener child lock ordering explicit
UPSTREAM: vhost: new device IOTLB API
BACKPORT: vhost: convert pre sorted vhost memory array to interval tree
UPSTREAM: vhost: introduce vhost memory accessors
UPSTREAM: vhost_net: stop polling socket during rx processing
UPSTREAM: VSOCK: constify vsock_transport structure
UPSTREAM: vhost: lockless enqueuing
UPSTREAM: vhost: simplify work flushing
UPSTREAM: VSOCK: Only check error on skb_recv_datagram when skb is NULL
BACKPORT: AF_VSOCK: Shrink the area influenced by prepare_to_wait
UPSTREAM: vhost_net: basic polling support
UPSTREAM: vhost: introduce vhost_vq_avail_empty()
UPSTREAM: vhost: introduce vhost_has_work()
UPSTREAM: vhost: rename vhost_init_used()
UPSTREAM: vhost: rename cross-endian helpers
UPSTREAM: vhost: fix error path in vhost_init_used()
UPSTREAM: virtio: make find_vqs() checkpatch.pl-friendly
UPSTREAM: net: move napi_hash[] into read mostly section
ANDROID: cuttlefish_defconfig: Enable VIRTIO_INPUT
ANDROID: cuttlefish_defconfig: Enable VIRT_WIFI
FROMGIT, BACKPORT: mac80211-next: rtnetlink wifi simulation device
ANDROID: Move from clang r328903 to r346389b.
ANDROID: arm64 defconfig / build config for cuttlefish
ANDROID: Communicates LMK events to userland where they can be logged
Fix merge issue with 4.4.178
Fix merge issue with 4.4.177
FROMGIT: binder: create node flag to request sender's security context
ion: Disable ION_HEAP_TYPE_SYSTEM_CONTIG
ANDROID: uid_sys_stats: Copy task_struct comm field to bigger buffer
UPSTREAM: binder: fix race that allows malicious free of live buffer
Makefile: Tidy up 4.4.165 merge
ANDROID: sdcardfs: Change current->fs under lock
ANDROID: sdcardfs: Don't use OVERRIDE_CRED macro
arm64/vdso: Fix nsec handling for CLOCK_MONOTONIC_RAW
ANDROID: arm64: mm: fix 4.4.154 merge
Fix backport of "tcp: detect malicious patterns in tcp_collapse_ofo_queue()"
tcp: detect malicious patterns in tcp_collapse_ofo_queue()
tcp: avoid collapses in tcp_prune_queue() if possible
x86_64_cuttlefish_defconfig: Enable android-verity
x86_64_cuttlefish_defconfig: enable verity cert
ANDROID: Fix massive cpufreq_times memory leaks
ANDROID: Reduce use of #ifdef CONFIG_CPU_FREQ_TIMES
UPSTREAM: binder: replace "%p" with "%pK"
UPSTREAM: binder: free memory on error
UPSTREAM: binder: fix proc->files use-after-free
UPSTREAM: Revert "FROMLIST: binder: fix proc->files use-after-free"
UPSTREAM: ANDROID: binder: change down_write to down_read
UPSTREAM: ANDROID: binder: correct the cmd print for BINDER_WORK_RETURN_ERROR
UPSTREAM: ANDROID: binder: remove 32-bit binder interface.
UPSTREAM: ANDROID: binder: re-order some conditions
UPSTREAM: android: binder: use VM_ALLOC to get vm area
UPSTREAM: android: binder: Use true and false for boolean values
UPSTREAM: android: binder: Use octal permissions
UPSTREAM: android: binder: Prefer __func__ to using hardcoded function name
UPSTREAM: ANDROID: binder: make binder_alloc_new_buf_locked static and indent its arguments
UPSTREAM: android: binder: Check for errors in binder_alloc_shrinker_init().
cpufreq: Kconfig: Remove CPU_FREQ_DEFAULT_GOV_SCHED
ANDROID: Add kconfig to make dm-verity check_at_most_once default enabled
ANDROID: sdcardfs: fix potential crash when reserved_mb is not zero
ANDROID: Update arm64 ranchu64_defconfig
FROMLIST: f2fs: run fstrim asynchronously if runtime discard is on
goldfish: pipe: ANDROID: address must be written as __pa(x), not x
goldfish: pipe: ANDROID: add missing check for memory allocated
goldfish: pipe: ANDROID: remove redundant blank lines
Update arch/x86/configs/x86_64_ranchu_defconfig
ANDROID: x86_64_cuttlefish_defconfig: Enable F2FS
ANDROID: Update x86_64_cuttlefish_defconfig
FROMLIST: f2fs: early updates queued for v4.18-rc1
Revert "vti4: Don't override MTU passed on link creation via IFLA_MTU"
UPSTREAM: sched/fair: Consider RT/IRQ pressure in capacity_spare_wake
BACKPORT, FROMLIST: fscrypt: add Speck128/256 support
cgroup: Disable IRQs while holding css_set_lock
Revert "cgroup: Disable IRQs while holding css_set_lock"
cgroup: Disable IRQs while holding css_set_lock
ANDROID: proc: fix undefined behavior in proc_uid_base_readdir
x86: vdso: Fix leaky vdso linker with CC=clang.
ANDROID: build: cuttlefish: Upgrade clang to newer version.
ANDROID: build: cuttlefish: Upgrade clang to newer version.
ANDROID: build: cuttlefish: Fix path to clang.
UPSTREAM: dm bufio: avoid sleeping while holding the dm_bufio lock
ANDROID: sdcardfs: Don't d_drop in d_revalidate
goldfish: pipe: ANDROID: mark local functions static
Revert "goldfish: pipe: ANDROID: Allocate memory with GFP_KERNEL."
UPSTREAM: ANDROID: binder: prevent transactions into own process.
goldfish: pipe: ANDROID: Add DMA support
UPSTREAM: f2fs: clear PageError on writepage - part 2
UPSTREAM: f2fs: avoid fsync() failure caused by EAGAIN in writepage()
ANDROID: build.config: enforce trace_printk check
ANDROID: x86_64_cuttlefish_defconfig: Disable KPTI
UPSTREAM: mac80211: ibss: Fix channel type enum in ieee80211_sta_join_ibss()
UPSTREAM: mac80211: Fix clang warning about constant operand in logical operation
UPSTREAM: nl80211: Fix enum type of variable in nl80211_put_sta_rate()
UPSTREAM: sysfs: remove signedness from sysfs_get_dirent
UPSTREAM: tracing: Use cpumask_available() to check if cpumask variable may be used
BACKPORT: clocksource: Use GENMASK_ULL in definition of CLOCKSOURCE_MASK
UPSTREAM: netpoll: Fix device name check in netpoll_setup()
FROMLIST: staging: Fix sparse warnings in vsoc driver.
FROMLIST: staging: vsoc: Fix a i386-randconfig warning.
FROMLIST: staging: vsoc: Create wc kernel mapping for region shm.
Revert "goldfish: pipe: ANDROID: remove a redundant target"
goldfish: pipe: ANDROID: Replace writel with gf_write_ptr
goldfish: pipe: ANDROID: Use dev_ logging instead of pr_
goldfish: pipe: ANDROID: fix checkpatch warnings
goldfish: pipe: ANDROID: Update module license
goldfish: pipe: ANDROID: Allocate memory with GFP_KERNEL.
goldfish: pipe: ANDROID: Do not crash
goldfish: pipe: ANDROID: remove redundant casting
goldfish: pipe: ANDROID: Add 'pipe' to pipe functions
goldfish: pipe: ANDROID: fix whitespace
goldfish: pipe: ANDROID: rename global variables
goldfish: pipe: ANDROID: remove a redundant target
goldfish: pipe: ANDROID: add blank lines
goldfish: pipe: ANDROID: replace 'BUG_ON' with 'BUILD_BUG_ON'
goldfish: pipe: ANDROID: use the 'BIT' macro for wakeup flags
goldfish: pipe: ANDROID: fix logging format strings
UPSTREAM: tracing: always define trace_{irq,preempt}_{enable_disable}
ANDROID: staging: ion: Obey kptr_restrict
ANDROID: sdcardfs: Set s_root to NULL after putting
ANDROID: sdcardfs: d_make_root calls iput
ANDROID: sdcardfs: Check for private data earlier
f2fs: check cap_resource only for data blocks
Revert "f2fs: introduce f2fs_set_page_dirty_nobuffer"
f2fs: clear PageError on writepage
UPSTREAM: timer: Export destroy_hrtimer_on_stack()
BACKPORT: dm verity: add 'check_at_most_once' option to only validate hashes once
f2fs: call unlock_new_inode() before d_instantiate()
f2fs: refactor read path to allow multiple postprocessing steps
fscrypt: allow synchronous bio decryption
ANDROID: Add build server config for cuttlefish.
ANDROID: Add defconfig for cuttlefish.
FROMLIST: staging: Android: Add 'vsoc' driver for cuttlefish.
Revert "proc: make oom adjustment files user read-only"
Revert "fixup! proc: make oom adjustment files user read-only"
ANDROID: proc: add null check in proc_uid_init
f2fs/fscrypt: updates to v4.17-rc1
Reduce amount of casting in drivers/tty/goldfish.c.
Replace #define with enum for better compilation errors.
Add missing include to drivers/tty/goldfish.c
Fix whitespace in drivers/tty/goldfish.c
ANDROID: fuse: Add null terminator to path in canonical path to avoid issue
ANDROID: sdcardfs: Fix sdcardfs to stop creating cases-sensitive duplicate entries.
ANDROID: add missing include to pdev_bus
ANDROID: pdev_bus: replace writel with gf_write_ptr
ANDROID: Cleanup type casting in goldfish.h
ANDROID: Include missing headers in goldfish.h
ANDROID: cpufreq: times: skip printing invalid frequencies
ANDROID: xt_qtaguid: Remove unnecessary null checks to device's name
ANDROID: xt_qtaguid: Remove unnecessary null checks to ifa_label
ANDROID: cpufreq: times: allocate enough space for a uid_entry
Revert "genirq: Use irqd_get_trigger_type to compare the trigger type for shared IRQs"
UPSTREAM: drm: virtio-gpu: set atomic flag
UPSTREAM: drm: virtio-gpu: transfer dumb buffers to host on plane update
UPSTREAM: drm: virtio-gpu: ensure plane is flushed to host on atomic update
UPSTREAM: drm: virtio-gpu: get the fb from the plane state for atomic updates
ANDROID: cpufreq: times: fix proc_time_in_state_show
dtc: turn off dtc unit address warnings by default
BACKPORT, FROMLIST: crypto: arm64/speck - add NEON-accelerated implementation of Speck-XTS
ANDROID: cpufreq: times: avoid prematurely freeing uid_entry
ANDROID: Use standard logging functions in goldfish_pipe
ANDROID: Fix whitespace in goldfish
staging: android: ashmem: Fix possible deadlock in ashmem_ioctl
llist: clang: introduce member_address_is_nonnull()
ANDROID: ranchu: 32 bit framebuffer support
ANDROID: Address checkpatch warnings in goldfishfb
ANDROID: Address checkpatch.pl warnings in goldfish_pipe
ANDROID: sdcardfs: fix lock issue on 32 bit/SMP architectures
ANDROID: goldfish: Fix typo in goldfish_cmd_locked() call
ANDROID: Address checkpatch.pl warnings in goldfish_pipe_v2
FROMLIST: f2fs: don't put dentry page in pagecache into highmem
ANDROID: Delete the goldfish_nand driver.
ANDROID: Add input support for Android Wear.
ANDROID: proc: fix config & includes for /proc/uid
FROMLIST: ARM: amba: Don't read past the end of sysfs "driver_override" buffer
UPSTREAM: ANDROID: binder: remove WARN() for redundant txn error
ANDROID: cpufreq: times: Add missing includes
ANDROID: cpufreq: Add time_in_state to /proc/uid directories
ANDROID: proc: Add /proc/uid directory
ANDROID: cpufreq: times: track per-uid time in state
ANDROID: cpufreq: track per-task time in state
ANDROID: keychord: Check for write data size
Revert "binder: add missing binder_unlock()"
ANDROID: sdcardfs: Set num in extension_details during make_item
ANDROID: sdcardfs: Hold i_mutex for i_size_write
BACKPORT, FROMGIT: crypto: speck - add test vectors for Speck64-XTS
BACKPORT, FROMGIT: crypto: speck - add test vectors for Speck128-XTS
BACKPORT, FROMGIT: crypto: arm/speck - add NEON-accelerated implementation of Speck-XTS
FROMGIT: crypto: speck - export common helpers
BACKPORT, FROMGIT: crypto: speck - add support for the Speck block cipher
UPSTREAM: ANDROID: binder: synchronize_rcu() when using POLLFREE.
f2fs: updates on v4.16-rc1
BACKPORT: tee: shm: Potential NULL dereference calling tee_shm_register()
BACKPORT: tee: shm: don't put_page on null shm->pages
BACKPORT: tee: shm: make function __tee_shm_alloc static
BACKPORT: tee: optee: check type of registered shared memory
BACKPORT: tee: add start argument to shm_register callback
BACKPORT: tee: optee: fix header dependencies
BACKPORT: tee: shm: inline tee_shm_get_id()
BACKPORT: tee: use reference counting for tee_context
BACKPORT: tee: optee: enable dynamic SHM support
BACKPORT: tee: optee: add optee-specific shared pool implementation
BACKPORT: tee: optee: store OP-TEE capabilities in private data
BACKPORT: tee: optee: add registered buffers handling into RPC calls
BACKPORT: tee: optee: add registered shared parameters handling
BACKPORT: tee: optee: add shared buffer registration functions
BACKPORT: tee: optee: add page list manipulation functions
BACKPORT: tee: optee: Update protocol definitions
BACKPORT: tee: shm: add page accessor functions
BACKPORT: tee: shm: add accessors for buffer size and page offset
BACKPORT: tee: add register user memory
BACKPORT: tee: flexible shared memory pool creation
BACKPORT: optee: support asynchronous supplicant requests
BACKPORT: tee: add TEE_IOCTL_PARAM_ATTR_META
BACKPORT: tee: add tee_param_is_memref() for driver use
f2fs: fix build error with multiply defined inode_nohighmem()
BACKPORT: xfrm: Fix return value check of copy_sec_ctx.
time: Fix ktime_get_raw() incorrect base accumulation
sched/fair: prevent possible infinite loop in sched_group_energy
UPSTREAM: MIPS: Fix build of compressed image
ANDROID: qtaguid: Fix the UAF probelm with tag_ref_tree
UPSTREAM: ANDROID: binder: remove waitqueue when thread exits.
UPSTREAM: arm64/efi: Make strnlen() available to the EFI namespace
UPSTREAM: ARM: boot: Add an implementation of strnlen for libfdt
ANDROID: MIPS: Add ranchu[32r5|32r6|64]_defconfig
FROMLIST: tty: goldfish: Enable 'earlycon' only if built-in
FROMLIST: MIPS: ranchu: Add Ranchu as a new generic-based board
FROMLIST: MIPS: Add noexec=on|off kernel parameter
FROMLIST: MIPS: CPC: Map registers using DT in mips_cpc_default_phys_base()
FROMLIST: dt-bindings: Document mti,mips-cpc binding
FROMLIST: MIPS: math-emu: Mark fall throughs in switch statements with a comment
FROMLIST: MIPS: math-emu: Avoid multiple assignment
FROMLIST: MIPS: math-emu: Avoid an assignment within if statement condition
FROMLIST: MIPS: math-emu: Declare function srl128() as static
FROMLIST: MIPS: math-emu: Avoid definition duplication for macro DPXMULT()
FROMLIST: MIPS: math-emu: Remove an unnecessary header inclusion
UPSTREAM: scripts/dtc: Update to upstream version 0931cea3ba20
UPSTREAM: scripts/dtc: dt_to_config - kernel config options for a devicetree
UPSTREAM: scripts/dtc: Update to upstream version 53bf130b1cdd
UPSTREAM: scripts/dtc: Update to upstream commit b06e55c88b9b
UPSTREAM: scripts/dtc: dtx_diff - add info to error message
UPSTREAM: dtc: create tool to diff device trees
UPSTREAM: config: android-base: disable CONFIG_NFSD and CONFIG_NFS_FS
UPSTREAM: config: android-base: add CGROUP_BPF
UPSTREAM: config: android-base: add CONFIG_MODULES option
UPSTREAM: config: android-base: add CONFIG_IKCONFIG option
UPSTREAM: config: android-base: disable CONFIG_USELIB and CONFIG_FHANDLE
UPSTREAM: config: android-base: enable hardened usercopy and kernel ASLR
UPSTREAM: config: android: enable CONFIG_SECCOMP
UPSTREAM: config: android: set SELinux as default security mode
UPSTREAM: config: android: move device mapper options to recommended
UPSTREAM: config/android: Remove CONFIG_IPV6_PRIVACY
UPSTREAM: config: add android config fragments
BACKPORT: MIPS: generic: Add a MAINTAINERS entry
BACKPORT: irqchip/irq-goldfish-pic: Add Goldfish PIC driver
UPSTREAM: dt-bindings/goldfish-pic: Add device tree binding for Goldfish PIC driver
UPSTREAM: MIPS: Allow storing pgd in C0_CONTEXT for MIPSr6
UPSTREAM: MIPS: CPS: Handle spurious VP starts more gracefully
UPSTREAM: MIPS: CPS: Handle cores not powering down more gracefully
UPSTREAM: MIPS: CPS: Prevent multi-core with dcache aliasing
UPSTREAM: MIPS: CPS: Select CONFIG_SYS_SUPPORTS_SCHED_SMT for MIPSr6
UPSTREAM: MIPS: CM: WARN on attempt to lock invalid VP, not BUG
UPSTREAM: MIPS: CM: Avoid per-core locking with CM3 & higher
UPSTREAM: MIPS: smp-cps: Avoid BUG() when offlining pre-r6 CPUs
UPSTREAM: MIPS: smp-cps: Add support for CPU hotplug of MIPSr6 processors
UPSTREAM: MIPS: generic: Bump default NR_CPUS to 16
UPSTREAM: MIPS: pm-cps: Change FSB workaround to CPU blacklist
UPSTREAM: MIPS: Fix early CM probing
UPSTREAM: MIPS: smp-cps: Stop printing EJTAG exceptions to UART
UPSTREAM: MIPS: smp-cps: Add nothreads kernel parameter
UPSTREAM: MIPS: smp-cps: Support MIPSr6 Virtual Processors
UPSTREAM: MIPS: smp-cps: Skip core setup if coherent
UPSTREAM: MIPS: smp-cps: Pull boot config retrieval out of mips_cps_boot_vpes
UPSTREAM: MIPS: smp-cps: Pull cache init into a function
UPSTREAM: MIPS: smp-cps: Ensure our VP ident calculation is correct
UPSTREAM: irqchip: mips-gic: Provide VP ID accessor
UPSTREAM: irqchip: mips-gic: Use HW IDs for VPE_OTHER_ADDR
UPSTREAM: MIPS: CM: Fix mips_cm_max_vp_width for UP kernels
UPSTREAM: MIPS: CM: Add CM GCR_BEV_BASE accessors
UPSTREAM: MIPS: CPC: Add start, stop and running CM3 CPC registers
UPSTREAM: MIPS: pm-cps: Avoid offset overflow on MIPSr6
UPSTREAM: MIPS: traps: Make sure secondary cores have a sane ebase register
UPSTREAM: MIPS: Detect MIPSr6 Virtual Processor support
UPSTREAM: Documentation: Add device tree binding for Goldfish FB driver
UPSTREAM: MIPS: math-emu: Use preferred flavor of unsigned integer declarations
UPSTREAM: MIPS: math-emu: <MADDF|MSUBF>.D: Fix accuracy (64-bit case)
UPSTREAM: MIPS: math-emu: <MADDF|MSUBF>.S: Fix accuracy (32-bit case)
UPSTREAM: MIPS: Update Goldfish RTC driver maintainer email address
UPSTREAM: MIPS: Update RINT emulation maintainer email address
UPSTREAM: MIPS: math-emu: do not use bools for arithmetic
UPSTREAM: rtc: goldfish: Add RTC driver for Android emulator
BACKPORT: dt-bindings: Add device tree binding for Goldfish RTC driver
UPSTREAM: tty: goldfish: Implement support for kernel 'earlycon' parameter
UPSTREAM: tty: goldfish: Use streaming DMA for r/w operations on Ranchu platforms
UPSTREAM: tty: goldfish: Refactor constants to better reflect their nature
UPSTREAM: MIPS: math-emu: Add FP emu debugfs stats for individual instructions
UPSTREAM: MIPS: math-emu: Add FP emu debugfs clear functionality
UPSTREAM: MIPS: math-emu: Add FP emu debugfs statistics for branches
BACKPORT: MIPS: math-emu: CLASS.D: Zero bits 32-63 of the result
BACKPORT: MIPS: math-emu: RINT.<D|S>: Fix several problems by reimplementation
UPSTREAM: MIPS: math-emu: CMP.Sxxx.<D|S>: Prevent occurrences of SIGILL crashes
UPSTREAM: MIPS: math-emu: <MADDF|MSUBF>.<D|S>: Clean up "maddf_flags" enumeration
UPSTREAM: MIPS: math-emu: <MADDF|MSUBF>.<D|S>: Fix some cases of zero inputs
UPSTREAM: MIPS: math-emu: <MADDF|MSUBF>.<D|S>: Fix some cases of infinite inputs
UPSTREAM: MIPS: math-emu: <MADDF|MSUBF>.<D|S>: Fix NaN propagation
UPSTREAM: tty: goldfish: Fix a parameter of a call to free_irq
UPSTREAM: MIPS: VDSO: Fix clobber lists in fallback code paths
UPSTREAM: MIPS: VDSO: Fix a mismatch between comment and preprocessor constant
UPSTREAM: MIPS: VDSO: Add implementation of gettimeofday() fallback
UPSTREAM: MIPS: VDSO: Add implementation of clock_gettime() fallback
UPSTREAM: MIPS: VDSO: Fix conversions in do_monotonic()/do_monotonic_coarse()
UPSTREAM: MIPS: unaligned: Add DSP lwx & lhx missaligned access support
UPSTREAM: MIPS: build: Fix "-modd-spreg" switch usage when compiling for mips32r6
UPSTREAM: MIPS: cmdline: Add support for 'memmap' parameter
UPSTREAM: MIPS: math-emu: Handle zero accumulator case in MADDF and MSUBF separately
UPSTREAM: MIPS: Support per-device DMA coherence
UPSTREAM: MIPS: dma-default: Don't check hw_coherentio if device is non-coherent
UPSTREAM: MIPS: Sanitise coherentio semantics
UPSTREAM: MIPS: CPC: Provide default mips_cpc_default_phys_base to ignore CPC
UPSTREAM: MIPS: generic: Introduce generic DT-based board support
UPSTREAM: MIPS: Support generating Flattened Image Trees (.itb)
UPSTREAM: MIPS: Allow emulation for unaligned [LS]DXC1 instructions
UPSTREAM: MIPS: math-emu: Fix BC1EQZ and BC1NEZ condition handling
UPSTREAM: MIPS: r2-on-r6-emu: Clear BLTZALL and BGEZALL debugfs counters
UPSTREAM: MIPS: r2-on-r6-emu: Fix BLEZL and BGTZL identification
UPSTREAM: MIPS: remove aliasing alignment if HW has antialising support
BACKPORT: MIPS: store the appended dtb address in a variable
UPSTREAM: MIPS: Fix FCSR Cause bit handling for correct SIGFPE issue
UPSTREAM: MIPS: kernel: Audit and remove any unnecessary uses of module.h
UPSTREAM: MIPS: c-r4k: Fix sigtramp SMP call to use kmap
UPSTREAM: MIPS: c-r4k: Fix protected_writeback_scache_line for EVA
UPSTREAM: MIPS: Spelling fix lets -> let's
UPSTREAM: MIPS: R6: Fix typo
UPSTREAM: MIPS: traps: Correct the SIGTRAP debug ABI in `do_watch' and `do_trap_or_bp'
UPSTREAM: MIPS: inst.h: Rename cbcond{0,1}_op to pop{1,3}0_op
UPSTREAM: MIPS: inst.h: Rename b{eq,ne}zcji[al]c_op to pop{6,7}6_op
UPSTREAM: MIPS: math-emu: Fix m{add,sub}.s shifts
UPSTREAM: MIPS: inst: Declare fsel_op for sel.fmt instruction
UPSTREAM: MIPS: math-emu: Fix code indentation
UPSTREAM: MIPS: math-emu: Fix bit-width in ieee754dp_{mul, maddf, msubf} comments
UPSTREAM: MIPS: math-emu: Add z argument macros
UPSTREAM: MIPS: math-emu: Unify ieee754dp_m{add,sub}f
UPSTREAM: MIPS: math-emu: Unify ieee754sp_m{add,sub}f
UPSTREAM: MIPS: math-emu: Emulate MIPSr6 sel.fmt instruction
UPSTREAM: MIPS: math-emu: Fix BC1{EQ,NE}Z emulation
UPSTREAM: MIPS: math-emu: Always propagate sNaN payload in quieting
UPSTREAM: MIPS: Fix misspellings in comments.
UPSTREAM: MIPS: math-emu: Add IEEE Std 754-2008 NaN encoding emulation
UPSTREAM: MIPS: math-emu: Add IEEE Std 754-2008 ABS.fmt and NEG.fmt emulation
UPSTREAM: MIPS: non-exec stack & heap when non-exec PT_GNU_STACK is present
UPSTREAM: MIPS: Add IEEE Std 754 conformance mode selection
UPSTREAM: MIPS: Determine the presence of IEEE Std 754-2008 features
UPSTREAM: MIPS: Define the legacy-NaN and 2008-NaN features
UPSTREAM: MIPS: ELF: Interpret the NAN2008 file header flag
UPSTREAM: ELF: Also pass any interpreter's file header to `arch_check_elf'
UPSTREAM: MIPS: Use a union to access the ELF file header
UPSTREAM: MIPS: Fix delay slot emulation count in debugfs
BACKPORT: exit_thread: accept a task parameter to be exited
UPSTREAM: mn10300: let exit_fpu accept a task
UPSTREAM: MIPS: Use per-mm page to execute branch delay slot instructions
BACKPORT: s390: get rid of exit_thread()
BACKPORT: exit_thread: remove empty bodies
UPSTREAM: MIPS: Make flush_thread
UPSTREAM: MIPS: Properly disable FPU in start_thread()
UPSTREAM: MIPS: Select CONFIG_HANDLE_DOMAIN_IRQ and make it work.
UPSTREAM: MIPS: math-emu: Fix typo
UPSTREAM: MIPS: math-emu: dsemul: Remove an unused bit in ADDIUPC emulation
UPSTREAM: MIPS: math-emu: dsemul: Reduce `get_isa16_mode' clutter
UPSTREAM: MIPS: math-emu: dsemul: Correct description of the emulation frame
UPSTREAM: MIPS: math-emu: Correct the emulation of microMIPS ADDIUPC instruction
UPSTREAM: MIPS: math-emu: Make microMIPS branch delay slot emulation work
UPSTREAM: MIPS: math-emu: dsemul: Fix ill formatting of microMIPS part
UPSTREAM: MIPS: math-emu: Correctly handle NOP emulation
ANDROID: sdcardfs: Protect set_top
ANDROID: fsnotify: Notify lower fs of open
Revert "ANDROID: sdcardfs: notify lower file of opens"
ANDROID: sdcardfs: Use lower getattr times/size
ANDROID: sched/rt: schedtune: Add boost retention to RT
ANDROID: sched: EAS: check energy_aware() before calling select_energy_cpu_brute() in up-migrate path
UPSTREAM: eventpoll.h: add missing epoll event masks
ANDROID: xattr: Pass EOPNOTSUPP to permission2
ANDROID: sdcardfs: Move default_normal to superblock
blkdev: Refactoring block io latency histogram codes
FROMLIST: arm64: kpti: Fix the interaction between ASID switching and software PAN
FROMLIST: arm64: Move post_ttbr_update_workaround to C code
FROMLIST: arm64: mm: Rename post_ttbr0_update_workaround
sched: EAS: Initialize push_task as NULL to avoid direct reference on out_unlock path
fscrypt: updates on 4.15-rc4
ANDROID: uid_sys_stats: fix the comment
BACKPORT: optee: fix invalid of_node_put() in optee_driver_init()
BACKPORT: tee: optee: sync with new naming of interrupts
BACKPORT: tee: indicate privileged dev in gen_caps
BACKPORT: tee: optee: interruptible RPC sleep
BACKPORT: tee: optee: add const to tee_driver_ops and tee_desc structures
BACKPORT: tee: tee_shm: Constify dma_buf_ops structures.
BACKPORT: tee: add forward declaration for struct device
BACKPORT: tee: optee: fix uninitialized symbol 'parg'
BACKPORT: tee.txt: standardize document format
BACKPORT: tee: add ARM_SMCCC dependency
BACKPORT: selinux: nlmsgtab: add SOCK_DESTROY to the netlink mapping tables
clocksource: arch_timer: make virtual counter access configurable
arm64: issue isb when trapping CNTVCT_EL0 access
BACKPORT: arm64: Add CNTFRQ_EL0 trap handler
BACKPORT: arm64: Add CNTVCT_EL0 trap handler
ANDROID: sdcardfs: Fix missing break on default_normal
ANDROID: usb: f_fs: Prevent gadget unbind if it is already unbound
arm64: Kconfig: Reword UNMAP_KERNEL_AT_EL0 kconfig entry
arm64: use RET instruction for exiting the trampoline
FROMLIST: arm64: kaslr: Put kernel vectors address in separate data page
FROMLIST: arm64: mm: Introduce TTBR_ASID_MASK for getting at the ASID in the TTBR
FROMLIST: arm64: Kconfig: Add CONFIG_UNMAP_KERNEL_AT_EL0
FROMLIST: arm64: entry: Add fake CPU feature for unmapping the kernel at EL0
FROMLIST: arm64: tls: Avoid unconditional zeroing of tpidrro_el0 for native tasks
FROMLIST: arm64: erratum: Work around Falkor erratum #E1003 in trampoline code
FROMLIST: arm64: entry: Hook up entry trampoline to exception vectors
FROMLIST: arm64: entry: Explicitly pass exception level to kernel_ventry macro
FROMLIST: arm64: mm: Map entry trampoline into trampoline and kernel page tables
FROMLIST: arm64: entry: Add exception trampoline page for exceptions from EL0
FROMLIST: arm64: mm: Invalidate both kernel and user ASIDs when performing TLBI
FROMLIST: arm64: mm: Add arm64_kernel_unmapped_at_el0 helper
FROMLIST: arm64: mm: Allocate ASIDs in pairs
FROMLIST: arm64: mm: Fix and re-enable ARM64_SW_TTBR0_PAN
FROMLIST: arm64: mm: Move ASID from TTBR0 to TTBR1
FROMLIST: arm64: mm: Temporarily disable ARM64_SW_TTBR0_PAN
FROMLIST: arm64: mm: Use non-global mappings for kernel space
UPSTREAM: arm64: factor out entry stack manipulation
UPSTREAM: arm64: tlbflush.h: add __tlbi() macro
ANDROID: sdcardfs: Add default_normal option
ANDROID: sdcardfs: notify lower file of opens
ANDROID: binder: Remove obsolete proc waitqueue.
UPSTREAM: arm64: setup: introduce kaslr_offset()
UPSTREAM: kcov: fix comparison callback signature
UPSTREAM: kcov: support comparison operands collection
UPSTREAM: kcov: remove pointless current != NULL check
UPSTREAM: kcov: support compat processes
UPSTREAM: kcov: simplify interrupt check
UPSTREAM: kcov: make kcov work properly with KASLR enabled
UPSTREAM: kcov: add more missing includes
UPSTREAM: kcov: add missing #include <linux/sched.h>
UPSTREAM: kcov: properly check if we are in an interrupt
UPSTREAM: kcov: don't profile branches in kcov
UPSTREAM: kcov: don't trace the code coverage code
BACKPORT: kernel: add kcov code coverage
BACKPORT: irq: Make the irqentry text section unconditional
UPSTREAM: arch, ftrace: for KASAN put hard/soft IRQ entries into separate sections
UPSTREAM: x86, kasan, ftrace: Put APIC interrupt handlers into .irqentry.text
UPSTREAM: kasan: make get_wild_bug_type() static
UPSTREAM: kasan: separate report parts by empty lines
UPSTREAM: kasan: improve double-free report format
UPSTREAM: kasan: print page description after stacks
UPSTREAM: kasan: improve slab object description
UPSTREAM: kasan: change report header
UPSTREAM: kasan: simplify address description logic
UPSTREAM: kasan: change allocation and freeing stack traces headers
UPSTREAM: kasan: unify report headers
UPSTREAM: kasan: introduce helper functions for determining bug type
BACKPORT: kasan: report only the first error by default
UPSTREAM: kasan: fix races in quarantine_remove_cache()
UPSTREAM: kasan: resched in quarantine_remove_cache()
BACKPORT: kasan, sched/headers: Uninline kasan_enable/disable_current()
BACKPORT: kasan: drain quarantine of memcg slab objects
UPSTREAM: kasan: eliminate long stalls during quarantine reduction
UPSTREAM: kasan: support panic_on_warn
UPSTREAM: x86/suspend: fix false positive KASAN warning on suspend/resume
UPSTREAM: kasan: support use-after-scope detection
UPSTREAM: kasan/tests: add tests for user memory access functions
UPSTREAM: mm, kasan: add a ksize() test
UPSTREAM: kasan: test fix: warn if the UAF could not be detected in kmalloc_uaf2
UPSTREAM: kasan: modify kmalloc_large_oob_right(), add kmalloc_pagealloc_oob_right()
UPSTREAM: lib/stackdepot: export save/fetch stack for drivers
UPSTREAM: lib/stackdepot.c: bump stackdepot capacity from 16MB to 128MB
BACKPORT: kprobes: Unpoison stack in jprobe_return() for KASAN
UPSTREAM: kasan: remove the unnecessary WARN_ONCE from quarantine.c
UPSTREAM: kasan: avoid overflowing quarantine size on low memory systems
UPSTREAM: kasan: improve double-free reports
BACKPORT: mm: coalesce split strings
BACKPORT: mm/kasan: get rid of ->state in struct kasan_alloc_meta
UPSTREAM: mm/kasan: get rid of ->alloc_size in struct kasan_alloc_meta
UPSTREAM: mm: kasan: remove unused 'reserved' field from struct kasan_alloc_meta
UPSTREAM: mm/kasan, slub: don't disable interrupts when object leaves quarantine
UPSTREAM: mm/kasan: don't reduce quarantine in atomic contexts
UPSTREAM: mm/kasan: fix corruptions and false positive reports
UPSTREAM: lib/stackdepot.c: use __GFP_NOWARN for stack allocations
BACKPORT: mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB
UPSTREAM: kasan/quarantine: fix bugs on qlist_move_cache()
UPSTREAM: mm: mempool: kasan: don't poot mempool objects in quarantine
UPSTREAM: kasan: change memory hot-add error messages to info messages
BACKPORT: mm/kasan: add API to check memory regions
UPSTREAM: mm/kasan: print name of mem[set,cpy,move]() caller in report
UPSTREAM: mm: kasan: initial memory quarantine implementation
UPSTREAM: lib/stackdepot: avoid to return 0 handle
UPSTREAM: lib/stackdepot.c: allow the stack trace hash to be zero
UPSTREAM: mm, kasan: fix compilation for CONFIG_SLAB
BACKPORT: mm, kasan: stackdepot implementation. Enable stackdepot for SLAB
BACKPORT: mm, kasan: add GFP flags to KASAN API
UPSTREAM: mm, kasan: SLAB support
UPSTREAM: mm/slab: align cache size first before determination of OFF_SLAB candidate
UPSTREAM: mm/slab: use more appropriate condition check for debug_pagealloc
UPSTREAM: mm/slab: factor out debugging initialization in cache_init_objs()
UPSTREAM: mm/slab: remove object status buffer for DEBUG_SLAB_LEAK
UPSTREAM: mm/slab: alternative implementation for DEBUG_SLAB_LEAK
UPSTREAM: mm/slab: clean up DEBUG_PAGEALLOC processing code
UPSTREAM: mm/slab: activate debug_pagealloc in SLAB when it is actually enabled
sched: EAS/WALT: Don't take into account of running task's util
BACKPORT: schedutil: Reset cached freq if it is not in sync with next_freq
UPSTREAM: kasan: add functions to clear stack poison
ANDROID: initramfs: call free_initrd() when skipping init
Documentation: tee subsystem and op-tee driver
tee: add OP-TEE driver
tee: generic TEE subsystem
dt/bindings: add bindings for optee
kernel.h: add u64_to_user_ptr()
ARM: 8481/2: drivers: psci: replace psci firmware calls
ARM: 8480/2: arm64: add implementation for arm-smccc
ARM: 8479/2: add implementation for arm-smccc
ARM: 8478/2: arm/arm64: add arm-smccc
UPSTREAM: net: xfrm: allow clearing socket xfrm policies.
BACKPORT: time: Clean up CLOCK_MONOTONIC_RAW time handling
BACKPORT: time: Fix CLOCK_MONOTONIC_RAW sub-nanosecond accounting
UPSTREAM: arm64: vdso: fix clock_getres for 4GiB-aligned res
f2fs: updates on 4.15-rc1
UPSTREAM: android: binder: fix type mismatch warning
BACKPORT: arm64: Use __pa_symbol for empty_zero_page
BACKPORT: arm64: Use __pa_symbol for kernel symbols
UPSTREAM: mm: Introduce lm_alias
FROMLIST: binder: fix proc->files use-after-free
ANDROID: binder: clarify deferred thread work.
FROMLIST: arm64: Avoid aligning normal memory pointers in __memcpy_{to,from}io
BACKPORT: xfrm: Clear sk_dst_cache when applying per-socket policy.
Revert "ANDROID: sched/rt: schedtune: Add boost retention to RT"
cpufreq: Drop schedfreq governor
ANDROID: sched/rt: schedtune: Add boost retention to RT
ANDROID: sched/rt: add schedtune accounting
ANDROID: Revert "arm64: move ELF_ET_DYN_BASE to 4GB / 4MB"
ANDROID: Revert "arm: move ELF_ET_DYN_BASE to 4MB"
sched: EAS: Fix the calculation of group util in group_idle_state()
sched: EAS: update trg_cpu to backup_cpu if no energy saving for target_cpu
sched: EAS: Fix the condition to distinguish energy before/after
sched: EAS: upmigrate misfit current task
sched: avoid pushing tasks to an offline CPU
sched: Extend active balance to accept 'push_task' argument
Revert "sched/core: Warn if ENERGY_AWARE is enabled but data is missing"
Revert "sched/core: fix have_sched_energy_data build warning"
FROMLIST: kbuild: clang: fix build failures with sparse check
Revert "Revert "UPSTREAM: efi/libstub/arm64: Set -fpie when building the EFI stub""
BACKPORT: efi/libstub: Unify command line param parsing
BACKPORT: arm64: relocatable: suppress R_AARCH64_ABS64 relocations in vmlinux
sched/core: fix have_sched_energy_data build warning
sched/core: Warn if ENERGY_AWARE is enabled but data is missing
sched: walt: Correct WALT window size initialization
FROMLIST: sched/fair: Use wake_q length as a hint for wake_wide
sched: WALT: account cumulative window demand
sched/fair: remove useless variable in find_best_target
sched/tune: access schedtune_initialized under CGROUP_SCHEDTUNE
sched/fair: consider task utilization in group_max_util()
sched/fair: consider task utilization in group_norm_util()
sched/fair: enforce EAS mode
sched/fair: ignore backup CPU when not valid
sched/fair: trace energy_diff for non boosted tasks
UPSTREAM: sched/fair: Sync task util before slow-path wakeup
UPSTREAM: sched/fair: Fix usage of find_idlest_group() when the local group is idlest
UPSTREAM: sched/fair: Fix usage of find_idlest_group() when no groups are allowed
BACKPORT: sched/fair: Fix find_idlest_group when local group is not allowed
UPSTREAM: sched/fair: Remove unnecessary comparison with -1
BACKPORT: sched/fair: Move select_task_rq_fair slow-path into its own function
UPSTREAM: sched/fair: Force balancing on nohz balance if local group has capacity
UPSTREAM: sched/core: Add missing update_rq_clock() call in set_user_nice()
UPSTREAM: sched/core: Add missing update_rq_clock() call for task_hot()
UPSTREAM: sched/core: Add missing update_rq_clock() in detach_task_cfs_rq()
UPSTREAM: sched/core: Add missing update_rq_clock() in post_init_entity_util_avg()
UPSTREAM: sched/core: Fix find_idlest_group() for fork
BACKPORT: sched/fair: Fix PELT integrity for new tasks
BACKPORT: sched/cgroup: Fix cpu_cgroup_fork() handling
UPSTREAM: sched/fair: Fix and optimize the fork() path
BACKPORT: sched/fair: Make it possible to account fair load avg consistently
cpufreq/sched: Consider max cpu capacity when choosing frequencies
ANDROID: binder: show high watermark of alloc->pages.
ANDROID: binder: Add thread->process_todo flag.
UPSTREAM: arm64: compat: Remove leftover variable declaration
ANDROID: sched/fair: Select correct capacity state for energy_diff
Revert "UPSTREAM: efi/libstub/arm64: Set -fpie when building the EFI stub"
cpufreq: schedutil: clamp util to CPU maximum capacity
FROMLIST: android: binder: Fix null ptr dereference in debug msg
FROMLIST: android: binder: Change binder_shrinker to static
cpufreq/sched: Use cpu max freq rather than policy max
trace: sched: Fix util_avg_walt in sched_load_avg_cpu trace
sched/fair: remove erroneous RCU_LOCKDEP_WARN from start_cpu()
sched: EAS/WALT: finish accounting prior to task_tick
cpufreq: sched: update capacity request upon tick always
sched/fair: prevent meaningless active migration
sched: walt: Leverage existing helper APIs to apply invariance
ANDROID: HACK: arm64: use -mno-implicit-float instead of -mgeneral-regs-only
sched: Update task->on_rq when tasks are moving between runqueues
FROMLIST: f2fs: expose some sectors to user in inline data or dentry case
crypto: Work around deallocated stack frame reference gcc bug on sparc.
UPSTREAM: f2fs: fix potential panic during fstrim
ANDROID: fscrypt: remove unnecessary fscrypto.h
ANDROID: binder: fix node sched policy calculation
ANDROID: binder: init desired_prio.sched_policy before use it
BACKPORT: net: xfrm: support setting an output mark.
UPSTREAM: xfrm: Only add l3mdev oif to dst lookups
UPSTREAM: net: l3mdev: Add master device lookup by index
ANDROID: Kbuild, LLVMLinux: allow overriding clang target triple
CHROMIUM: arm64: Disable asm-operand-width warning for clang
CHROMIUM: kbuild: clang: Disable the 'duplicate-decl-specifier' warning
UPSTREAM: x86/build: Use cc-option to validate stack alignment parameter
UPSTREAM: x86/build: Fix stack alignment for CLang
UPSTREAM: efi/libstub/arm64: Set -fpie when building the EFI stub
BACKPORT: efi/libstub/arm64: Force 'hidden' visibility for section markers
UPSTREAM: compiler, clang: always inline when CONFIG_OPTIMIZE_INLINING is disabled
UPSTREAM: x86/boot: #undef memcpy() et al in string.c
UPSTREAM: crypto: arm64/sha - avoid non-standard inline asm tricks
UPSTREAM: kbuild: clang: Disable 'address-of-packed-member' warning
UPSTREAM: x86/build: Specify stack alignment for clang
UPSTREAM: x86/build: Use __cc-option for boot code compiler options
BACKPORT: kbuild: Add __cc-option macro
UPSTREAM: x86/hweight: Don't clobber %rdi
BACKPORT: x86/hweight: Get rid of the special calling convention
BACKPORT: x86/mm/kaslr: Use the _ASM_MUL macro for multiplication to work around Clang incompatibility
UPSTREAM: crypto, x86: aesni - fix token pasting for clang
UPSTREAM: x86/kbuild: Use cc-option to enable -falign-{jumps/loops}
UPSTREAM: compiler, clang: properly override 'inline' for clang
UPSTREAM: compiler, clang: suppress warning for unused static inline functions
UPSTREAM: Kbuild: provide a __UNIQUE_ID for clang
UPSTREAM: modules: mark __inittest/__exittest as __maybe_unused
BACKPORT: kbuild: Add support to generate LLVM assembly files
UPSTREAM: kbuild: use -Oz instead of -Os when using clang
BACKPORT: kbuild, LLVMLinux: Add -Werror to cc-option to support clang
UPSTREAM: kbuild: drop -Wno-unknown-warning-option from clang options
UPSTREAM: kbuild: fix asm-offset generation to work with clang
UPSTREAM: kbuild: consolidate redundant sed script ASM offset generation
UPSTREAM: kbuild: Consolidate header generation from ASM offset information
UPSTREAM: kbuild: clang: add -no-integrated-as to KBUILD_[AC]FLAGS
UPSTREAM: kbuild: Add better clang cross build support
FROMLIST: tracing: Add support for preempt and irq enable/disable events
FROMLIST: tracing: Prepare to add preempt and irq trace events
ANDROID: binder: fix transaction leak.
ANDROID: binder: Add tracing for binder priority inheritance.
f2fs: catch up to v4.14-rc1
UPSTREAM: cpufreq: schedutil: use now as reference when aggregating shared policy requests
ANDROID: add script to fetch android kernel config fragments
f2fs: reorganize stat information
f2fs: clean up flush/discard command namings
f2fs: check in-memory sit version bitmap
f2fs: check in-memory nat version bitmap
f2fs: check in-memory block bitmap
f2fs: introduce FI_ATOMIC_COMMIT
f2fs: clean up with list_{first, last}_entry
f2fs: return fs_trim if there is no candidate
f2fs: avoid needless checkpoint in f2fs_trim_fs
f2fs: relax async discard commands more
f2fs: drop exist_data for inline_data when truncated to 0
f2fs: don't allow encrypted operations without keys
f2fs: show the max number of atomic operations
f2fs: get io size bit from mount option
f2fs: support IO alignment for DATA and NODE writes
f2fs: add submit_bio tracepoint
f2fs: reassign new segment for mode=lfs
f2fs: fix a missing discard prefree segments
f2fs: use rb_entry_safe
f2fs: add a case of no need to read a page in write begin
f2fs: fix a problem of using memory after free
f2fs: remove unneeded condition
f2fs: don't cache nat entry if out of memory
f2fs: remove unused values in recover_fsync_data
f2fs: support async discard based on v4.9
f2fs: resolve op and op_flags confilcts
f2fs: remove wrong backported codes
FROMLIST: binder: fix use-after-free in binder_transaction()
UPSTREAM: ipv6: fib: Unlink replaced routes from their nodes
f2fs: fix a missing size change in f2fs_setattr
f2fs: fix to access nullified flush_cmd_control pointer
f2fs: free meta pages if sanity check for ckpt is failed
f2fs: detect wrong layout
f2fs: call sync_fs when f2fs is idle
Revert "f2fs: use percpu_counter for # of dirty pages in inode"
f2fs: return AOP_WRITEPAGE_ACTIVATE for writepage
f2fs: do not activate auto_recovery for fallocated i_size
f2fs: fix 32-bit build
f2fs: fix incorrect free inode count in ->statfs
f2fs: drop duplicate header timer.h
f2fs: fix wrong AUTO_RECOVER condition
f2fs: do not recover i_size if it's valid
f2fs: fix fdatasync
f2fs: fix to account total free nid correctly
f2fs: fix an infinite loop when flush nodes in cp
f2fs: don't wait writeback for datas during checkpoint
f2fs: fix wrong written_valid_blocks counting
f2fs: avoid BG_GC in f2fs_balance_fs
f2fs: fix redundant block allocation
f2fs: use err for f2fs_preallocate_blocks
f2fs: support multiple devices
f2fs: allow dio read for LFS mode
f2fs: revert segment allocation for direct IO
f2fs: return directly if block has been removed from the victim
Revert "f2fs: do not recover from previous remained wrong dnodes"
f2fs: remove checkpoint in f2fs_freeze
f2fs: assign segments correctly for direct_io
f2fs: fix wrong i_atime recovery
f2fs: record inode updating status correctly
f2fs: Trace reset zone events
f2fs: Reset sequential zones on zoned block devices
f2fs: Cache zoned block devices zone type
f2fs: Do not allow adaptive mode for host-managed zoned block devices
f2fs: Always enable discard for zoned blocks devices
f2fs: Suppress discard warning message for zoned block devices
f2fs: Check zoned block feature for host-managed zoned block devices
f2fs: Use generic zoned block device terminology
f2fs: Add missing break in switch-case
f2fs: avoid infinite loop in the EIO case on recover_orphan_inodes
f2fs: report error of f2fs_fill_dentries
fs/crypto: catch up 4.9-rc6
f2fs: hide a maybe-uninitialized warning
f2fs: remove percpu_count due to performance regression
f2fs: make clean inodes when flushing inode page
f2fs: keep dirty inodes selectively for checkpoint
f2fs: Replace CURRENT_TIME_SEC with current_time() for inode timestamps
f2fs: use BIO_MAX_PAGES for bio allocation
f2fs: declare static function for __build_free_nids
f2fs: call f2fs_balance_fs for setattr
f2fs: count dirty inodes to flush node pages during checkpoint
f2fs: avoid casted negative value as shrink count
f2fs: don't interrupt free nids building during nid allocation
f2fs: clean up free nid list operations
f2fs: split free nid list
f2fs: clear nlink if fail to add_link
f2fs: fix sparse warnings
f2fs: fix error handling in fsync_node_pages
f2fs: fix to update largest extent under lock
f2fs: be aware of extent beyond EOF in fiemap
f2fs: don't miss any f2fs_balance_fs cases
f2fs: add missing f2fs_balance_fs in f2fs_zero_range
f2fs: give a chance to detach from dirty list
f2fs: fix to release discard entries during checkpoint
f2fs: exclude free nids building and allocation
f2fs: fix to determine start_cp_addr by sbi->cur_cp_pack
f2fs: fix overflow due to condition check order
posix_acl: Clear SGID bit when setting file permissions
f2fs: fix wrong sum_page pointer in f2fs_gc
f2fs: backport from (4c1fad64 - Merge tag 'for-f2fs-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs)
Revert "ANDROID: sched/tune: Initialize raw_spin_lock in boosted_groups"
BACKPORT: partial: mm, oom_reaper: do not mmput synchronously from the oom reaper context
FROMLIST: android: binder: Don't get mm from task
FROMLIST: android: binder: Remove unused vma argument
FROMLIST: android: binder: Drop lru lock in isolate callback
ANDROID: configs: remove config fragments
drivers: cpufreq_interactive: handle error for module load fail
UPSTREAM: Fix build break in fork.c when THREAD_SIZE < PAGE_SIZE
ANDROID: sdcardfs: Add missing break
ANDROID: Sdcardfs: Move gid derivation under flag
ANDROID: mnt: Fix freeing of mount data
drivers: cpufreq: checks to avoid kernel crash in cpufreq_interactive
ANDROID: Use sk_uid to replace uid get from socket file
ANDROID: nf: xt_qtaguid: fix handling for cases where tunnels are used.
Revert "ANDROID: Use sk_uid to replace uid get from socket file"
ANDROID: fiq_debugger: Fix minor bug in code
ANDROID: cpufreq-dt: Set sane defaults for schedutil rate limits
BACKPORT: cpufreq: schedutil: Use policy-dependent transition delays
FROMLIST: binder: fix an ret value override
FROMLIST: binder: fix memory corruption in binder_transaction binder
sched: WALT: fix window mis-alignment
sched: EAS: kill incorrect nohz idle cpu kick
sched: EAS: fix incorrect energy delta calculation due to rounding error
sched: EAS/WALT: take into account of waking task's load
cpufreq: sched: WALT: don't apply capacity margin twice
sched: WALT: fix potential overflow
sched: EAS: schedfreq: fix CPU util over estimation
sched: EAS/WALT: use cr_avg instead of prev_runnable_sum
sched: WALT: fix broken cumulative runnable average accounting
sched: deadline: WALT: account cumulative runnable avg
FROMLIST: android: binder: Add page usage in binder stats
FROMLIST: android: binder: Add shrinker tracepoints
FROMLIST: android: binder: Add global lru shrinker to binder
FROMLIST: android: binder: Move buffer out of area shared with user space
FROMLIST: android: binder: Add allocator selftest
FROMLIST: android: binder: Refactor prev and next buffer into a helper function
android: android-base.config: enable IP6_NF_MATCH_RPFILTER
UPSTREAM: cpufreq: schedutil: Use unsigned int for iowait boost
UPSTREAM: cpufreq: schedutil: Make iowait boost more energy efficient
ANDROID: NFC: st21nfca: Fix memory OOB and leak issues in connectivity events handler
ANDROID: check dir value of xfrm_userpolicy_id
ANDROID: NFC: Fix possible memory corruption when handling SHDLC I-Frame commands
ANDROID: nfc: fdp: Fix possible buffer overflow in WCS4000 NFC driver
ANDROID: NFC: st21nfca: Fix out of bounds kernel access when handling ATR_REQ
UPSTREAM: usb: dwc3: gadget: don't send extra ZLP
BACKPORT: usb: dwc3: gadget: handle request->zero
ANDROID: usb: gadget: assign no-op request complete callbacks
ANDROID: usb: gadget: configfs: fix null ptr in android_disconnect
ANDROID: uid_sys_stats: Fix implicit declaration of get_cmdline()
uid_sys_stats: log task io with a debug flag
ANDROID: Use sk_uid to replace uid get from socket file
UPSTREAM: arm64: smp: Prevent raw_smp_processor_id() recursion
UPSTREAM: arm64: restore get_current() optimisation
ANDROID: arm64: Fix a copy-paste error in prior init_thread_info build fix
UPSTREAM: locking: avoid passing around 'thread_info' in mutex debugging code
ANDROID: arm64: fix undeclared 'init_thread_info' error
UPSTREAM: kdb: use task_cpu() instead of task_thread_info()->cpu
ANDROID: keychord: Fix for a memory leak in keychord.
ANDROID: keychord: Fix races in keychord_write.
Use %zu to print resid (size_t).
ANDROID: keychord: Fix a slab out-of-bounds read.
ANDROID: binder: don't queue async transactions to thread.
ANDROID: binder: don't enqueue death notifications to thread todo.
ANDROID: binder: call poll_wait() unconditionally.
android: configs: move quota-related configs to recommended
BACKPORT: arm64: split thread_info from task stack
UPSTREAM: arm64: assembler: introduce ldr_this_cpu
UPSTREAM: arm64: make cpu number a percpu variable
UPSTREAM: arm64: smp: prepare for smp_processor_id() rework
BACKPORT: arm64: move sp_el0 and tpidr_el1 into cpu_suspend_ctx
UPSTREAM: arm64: prep stack walkers for THREAD_INFO_IN_TASK
UPSTREAM: arm64: unexport walk_stackframe
UPSTREAM: arm64: traps: simplify die() and __die()
UPSTREAM: arm64: factor out current_stack_pointer
BACKPORT: arm64: asm-offsets: remove unused definitions
UPSTREAM: arm64: thread_info remove stale items
UPSTREAM: thread_info: include <current.h> for THREAD_INFO_IN_TASK
UPSTREAM: thread_info: factor out restart_block
UPSTREAM: kthread: Pin the stack via try_get_task_stack()/put_task_stack() in to_live_kthread() function
UPSTREAM: sched/core: Add try_get_task_stack() and put_task_stack()
UPSTREAM: sched/core: Allow putting thread_info into task_struct
UPSTREAM: printk: when dumping regs, show the stack, not thread_info
UPSTREAM: fix up initial thread stack pointer vs thread_info confusion
UPSTREAM: Clarify naming of thread info/stack allocators
ANDROID: sdcardfs: override credential for ioctl to lower fs
ANDROID: binder: Don't BUG_ON(!spin_is_locked()).
sched/fair: Add a backup_cpu to find_best_target
sched/fair: Try to estimate possible idle states.
sched/fair: Sync task util before EAS wakeup
Revert "sched/fair: ensure utilization signals are synchronized before use"
sched/fair: kick nohz idle balance for misfit task
sched/fair: Update signals of nohz cpus if we are going idle
events: add tracepoint for find_best_target
sched/fair: streamline find_best_target heuristics
UPSTREAM: af_key: Fix sadb_x_ipsecrequest parsing
ANDROID: lowmemorykiller: Add tgid to kill message
Revert "proc: smaps: Allow smaps access for CAP_SYS_RESOURCE"
ANDROID: android-verity: mark dev as rw for linear target
ANDROID: sdcardfs: Remove unnecessary lock
ANDROID: binder: don't check prio permissions on restore.
Add BINDER_GET_NODE_DEBUG_INFO ioctl
UPSTREAM: cpufreq: schedutil: Trace frequency only if it has changed
UPSTREAM: cpufreq: schedutil: Avoid reducing frequency of busy CPUs prematurely
UPSTREAM: cpufreq: schedutil: Refactor sugov_next_freq_shared()
UPSTREAM: cpufreq: schedutil: Fix per-CPU structure initialization in sugov_start()
UPSTREAM: cpufreq: schedutil: Pass sg_policy to get_next_freq()
UPSTREAM: cpufreq: schedutil: move cached_raw_freq to struct sugov_policy
UPSTREAM: cpufreq: schedutil: Rectify comment in sugov_irq_work() function
UPSTREAM: cpufreq: schedutil: irq-work and mutex are only used in slow path
UPSTREAM: cpufreq: schedutil: enable fast switch earlier
UPSTREAM: cpufreq: schedutil: Avoid indented labels
ANDROID: binder: add RT inheritance flag to node.
ANDROID: binder: improve priority inheritance.
ANDROID: binder: add min sched_policy to node.
ANDROID: binder: add support for RT prio inheritance.
ANDROID: binder: push new transactions to waiting threads.
ANDROID: binder: remove proc waitqueue
FROMLIST: binder: remove global binder lock
FROMLIST: binder: fix death race conditions
FROMLIST: binder: protect against stale pointers in print_binder_transaction
FROMLIST: binder: protect binder_ref with outer lock
FROMLIST: binder: use inner lock to protect thread accounting
FROMLIST: binder: protect transaction_stack with inner lock.
FROMLIST: binder: protect proc->threads with inner_lock
FROMLIST: binder: protect proc->nodes with inner lock
FROMLIST: binder: add spinlock to protect binder_node
FROMLIST: binder: add spinlocks to protect todo lists
FROMLIST: binder: use inner lock to sync work dq and node counts
FROMLIST: binder: introduce locking helper functions
FROMLIST: binder: use node->tmp_refs to ensure node safety
FROMLIST: binder: refactor binder ref inc/dec for thread safety
FROMLIST: binder: make sure accesses to proc/thread are safe
FROMLIST: binder: make sure target_node has strong ref
FROMLIST: binder: guarantee txn complete / errors delivered in-order
FROMLIST: binder: refactor binder_pop_transaction
FROMLIST: binder: use atomic for transaction_log index
FROMLIST: binder: add more debug info when allocation fails.
FROMLIST: binder: protect against two threads freeing buffer
FROMLIST: binder: remove dead code in binder_get_ref_for_node
FROMLIST: binder: don't modify thread->looper from other threads
FROMLIST: binder: avoid race conditions when enqueuing txn
FROMLIST: binder: refactor queue management in binder_thread_read
FROMLIST: binder: add log information for binder transaction failures
FROMLIST: binder: make binder_last_id an atomic
FROMLIST: binder: change binder_stats to atomics
FROMLIST: binder: add protection for non-perf cases
FROMLIST: binder: remove binder_debug_no_lock mechanism
FROMLIST: binder: move binder_alloc to separate file
FROMLIST: binder: separate out binder_alloc functions
FROMLIST: binder: remove unneeded cleanup code
FROMLIST: binder: separate binder allocator structure from binder proc
FROMLIST: binder: Use wake up hint for synchronous transactions.
Revert "android: binder: move global binder state into context struct."
sched: walt: fix window misalignment when HZ=300
ANDROID: android-base.cfg: remove CONFIG_CGROUP_DEBUG
ANDROID: sdcardfs: use mount_nodev and fix a issue in sdcardfs_kill_sb
UPSTREAM: selinux: enable genfscon labeling for tracefs
UPSTREAM: drivers/perf: arm-pmu: fix RCU usage on pmu resume from low-power
UPSTREAM: drivers/perf: arm_pmu: implement CPU_PM notifier
ANDROID: squashfs: Fix endianness issue
ANDROID: squashfs: Fix signed division issue
UPSTREAM: usb: gadget: f_fs: avoid out of bounds access on comp_desc
UPSTREAM: bpf: don't let ldimm64 leak map addresses on unprivileged
BACKPORT: ext4: fix data exposure after a crash
ANDROID: sdcardfs: remove dead function open_flags_to_access_mode()
ANDROID: android-base.cfg: split out arm64-specific configs
usb: gadget: f_fs: Fix possibe deadlock
ANDROID: uid_sys_stats: check previous uid_entry before call find_or_register_uid
ANDROID: sdcardfs: d_splice_alias can return error values
android: base-cfg: disable CONFIG_NFS_FS and CONFIG_NFSD
schedstats/eas: guard properly to avoid breaking non-smp schedstats users
BACKPORT: f2fs: sanity check size of nat and sit cache
FROMLIST: f2fs: sanity check checkpoint segno and blkoff
sched/tune: don't use schedtune before it is ready
sched/fair: use SCHED_CAPACITY_SCALE for energy normalization
sched/{fair,tune}: use reciprocal_value to compute boost margin
sched/tune: Initialize raw_spin_lock in boosted_groups
sched/tune: report when SchedTune has not been initialized
sched/tune: fix sched_energy_diff tracepoint
sched/tune: increase group count to 5
cpufreq/schedutil: use boosted_cpu_util for PELT to match WALT
sched/fair: Fix sched_group_energy() to support per-cpu capacity states
sched/fair: discount task contribution to find CPU with lowest utilization
sched/fair: ensure utilization signals are synchronized before use
sched/fair: remove task util from own cpu when placing waking task
trace:sched: Make util_avg in load_avg trace reflect PELT/WALT as used
sched/fair: Add eas (& cas) specific rq, sd and task stats
sched/core: Fix PELT jump to max OPP upon util increase
sched: EAS & 'single cpu per cluster'/cpu hotplug interoperability
UPSTREAM: sched/core: Fix group_entity's share update
UPSTREAM: sched/fair: Fix calc_cfs_shares() fixed point arithmetics width confusion
UPSTREAM: sched/fair: Fix incorrect task group ->load_avg
UPSTREAM: sched/fair: Fix effective_load() to consistently use smoothed load
UPSTREAM: sched/fair: Propagate asynchrous detach
UPSTREAM: sched/fair: Propagate load during synchronous attach/detach
UPSTREAM: sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list
BACKPORT: sched/fair: Factorize PELT update
UPSTREAM: sched/fair: Factorize attach/detach entity
UPSTREAM: sched/fair: Improve PELT stuff some more
UPSTREAM: sched/fair: Apply more PELT fixes
UPSTREAM: sched/fair: Fix post_init_entity_util_avg() serialization
BACKPORT: sched/fair: Initiate a new task's util avg to a bounded value
sched/fair: Simplify idle_idx handling in select_idle_sibling()
sched/fair: refactor find_best_target() for simplicity
sched/fair: Change cpu iteration order in find_best_target()
sched/core: Add first cpu w/ max/min orig capacity to root domain
sched/core: Remove remnants of commit fd5c98da1a42
sched: Remove sysctl_sched_is_big_little
sched/fair: Code !is_big_little path into select_energy_cpu_brute()
EAS: sched/fair: Re-integrate 'honor sync wakeups' into wakeup path
Fixup!: sched/fair.c: Set SchedTune specific struct energy_env.task
sched/fair: Energy-aware wake-up task placement
sched/fair: Add energy_diff dead-zone margin
sched/fair: Decommission energy_aware_wake_cpu()
sched/fair: Do not force want_affine eq. true if EAS is enabled
arm64: Set SD_ASYM_CPUCAPACITY sched_domain flag on DIE level
UPSTREAM: sched/fair: Fix incorrect comment for capacity_margin
UPSTREAM: sched/fair: Avoid pulling tasks from non-overloaded higher capacity groups
UPSTREAM: sched/fair: Add per-CPU min capacity to sched_group_capacity
UPSTREAM: sched/fair: Consider spare capacity in find_idlest_group()
UPSTREAM: sched/fair: Compute task/cpu utilization at wake-up correctly
UPSTREAM: sched/fair: Let asymmetric CPU configurations balance at wake-up
UPSTREAM: sched/core: Enable SD_BALANCE_WAKE for asymmetric capacity systems
UPSTREAM: sched/core: Pass child domain into sd_init()
UPSTREAM: sched/core: Introduce SD_ASYM_CPUCAPACITY sched_domain topology flag
UPSTREAM: sched/core: Remove unnecessary NULL-pointer check
UPSTREAM: sched/fair: Optimize find_idlest_cpu() when there is no choice
BACKPORT: sched/fair: Make the use of prev_cpu consistent in the wakeup path
UPSTREAM: sched/core: Fix power to capacity renaming in comment
Partial Revert: "WIP: sched: Add cpu capacity awareness to wakeup balancing"
Revert "WIP: sched: Consider spare cpu capacity at task wake-up"
FROM-LIST: cpufreq: schedutil: Redefine the rate_limit_us tunable
cpufreq: schedutil: add up/down frequency transition rate limits
trace/sched: add rq utilization signal for WALT
sched/cpufreq: make schedutil use WALT signal
sched: cpufreq: use rt_avg as estimate of required RT CPU capacity
cpufreq: schedutil: move slow path from workqueue to SCHED_FIFO task
BACKPORT: kthread: allow to cancel kthread work
sched/cpufreq: fix tunables for schedfreq governor
BACKPORT: cpufreq: schedutil: New governor based on scheduler utilization data
sched: backport cpufreq hooks from 4.9-rc4
ANDROID: Kconfig: add depends for UID_SYS_STATS
ANDROID: hid: uhid: implement refcount for open and close
Revert "ext4: require encryption feature for EXT4_IOC_SET_ENCRYPTION_POLICY"
ANDROID: mnt: Fix next_descendent
ANDROID: uid_sys_stats: defer io stats calulation for dead tasks
ANDROID: AVB: Fix linter errors.
ANDROID: AVB: Fix invalidate_vbmeta_submit().
ANDROID: sdcardfs: Check for NULL in revalidate
ANDROID: AVB: Only invalidate vbmeta when told to do so.
ANDROID: sdcardfs: Move top to its own struct
ANDROID: lowmemorykiller: account for unevictable pages
ANDROID: usb: gadget: fix NULL pointer issue in mtp_read()
ANDROID: usb: f_mtp: return error code if transfer error in receive_file_work function
ANDROID: android-base.cfg: remove spurious CONFIG_MODULES line
ANDROID: memory_state_time: fix undefined behavior with missing DT properties
ANDROID: rfkill: fix unused function warning
ANDROID: make PF_KEY SHA256 use RFC-compliant truncation.
ANDROID: sdcardfs: fix sdcardfs_destroy_inode for the inode RCU approach
ANDROID: android-base.cfg: remove NETFILTER_XT_MATCH_QUOTA2_LOG
ANDROID: sdcardfs: Don't iput if we didn't igrab
ANDROID: Add untag hacks to inet_release function
ANDROID: android-base.cfg: remove USB_OTG_WAKELOCK
ANDROID: android-base.cfg: remove defunct options
ANDROID: arm64: suspend: Restore the UAO state
ANDROID: usb: gadget: f_audio_source: disable the CPU C-states upon playback
ANDROID: usb: gadget: f_mtp: Set 0xFFFFFFFF in mtp header ContainerLength field
net: pppolac/pppopns: Add back the msg_flags
ANDROID: uid_sys_stats: fix access of task_uid(task)
BACKPORT: f2fs: sanity check log_blocks_per_seg
ANDROID: sdcardfs: Call lower fs's revalidate
ANDROID: sdcardfs: Avoid setting GIDs outside of valid ranges
ANDROID: sdcardfs: Copy meta-data from lower inode
Revert "Revert "Android: sdcardfs: Don't do d_add for lower fs""
ANDROID: sdcardfs: Use filesystem specific hash
ANDROID: AVB error handler to invalidate vbmeta partition.
ANDROID: Update init/do_mounts_dm.c to the latest ChromiumOS version.
Revert "[RFC]cgroup: Change from CAP_SYS_NICE to CAP_SYS_RESOURCE for cgroup migration permissions"
Revert "USB: gadget: u_ether: Fix data stall issue in RNDIS tethering mode"
ANDROID: uid_sys_stats: reduce update_io_stats overhead
UPSTREAM: char: lack of bool string made CONFIG_DEVPORT always on
UPSTREAM: char: Drop bogus dependency of DEVPORT on !M68K
Revert "Android: sdcardfs: Don't do d_add for lower fs"
ANDROID: usb: gadget: fix MTP enumeration issue under super speed mode
Android: sdcardfs: Don't complain in fixup_lower_ownership
Android: sdcardfs: Don't do d_add for lower fs
ANDROID: sdcardfs: ->iget fixes
Android: sdcardfs: Change cache GID value
BACKPORT: [UPSTREAM] ext2: convert to mbcache2
BACKPORT [UPSTREAM] ext4: convert to mbcache2
BACKPORT: [UPSTREAM] mbcache2: reimplement mbcache
UPSTREAM: net: socket: Make unnecessarily global sockfs_setattr() static
UPSTREAM: net: ipv4: Don't crash if passing a null sk to ip_do_redirect.
UPSTREAM: net/packet: fix overflow in check for priv area size
Revert "Revert "Revert "CHROMIUM: android: binder: Fix potential scheduling-while-atomic"""
ANDROID: sdcardfs: Directly pass lower file for mmap
UPSTREAM: checkpatch: special audit for revert commit line
UPSTREAM: PM / sleep: make PM notifiers called symmetrically
Revert "Revert "CHROMIUM: android: binder: Fix potential scheduling-while-atomic""
BACKPORT: arm64: dts: juno: fix cluster sleep state entry latency on all SoC versions
staging: android: ashmem: lseek failed due to no FMODE_LSEEK.
ANDROID: sdcardfs: update module info
ANDROID: sdcardfs: use d_splice_alias
ANDROID: sdcardfs: add read_iter/write_iter opeations
ANDROID: sdcardfs: fix ->llseek to update upper and lower offset
ANDROID: sdcardfs: copy lower inode attributes in ->ioctl
ANDROID: sdcardfs: remove unnecessary call to do_munmap
Merge 4.4.59 into android-4.4
UPSTREAM: ipv6 addrconf: implement RFC7559 router solicitation backoff
android: base-cfg: enable CONFIG_INET_DIAG_DESTROY
ANDROID: android-base.cfg: add CONFIG_MODULES option
ANDROID: android-base.cfg: add CONFIG_IKCONFIG option
ANDROID: android-base.cfg: properly sort the file
ANDROID: binder: add hwbinder,vndbinder to BINDER_DEVICES.
ANDROID: sort android-recommended.cfg
UPSTREAM: config/android: Remove CONFIG_IPV6_PRIVACY
UPSTREAM: config: android: set SELinux as default security mode
config: android: move device mapper options to recommended
ANDROID: ARM64: Allow to choose appended kernel image
UPSTREAM: arm64: vdso: constify vm_special_mapping used for aarch32 vectors page
UPSTREAM: arm64: vdso: add __init section marker to alloc_vectors_page
UPSTREAM: ARM: 8597/1: VDSO: put RO and RO after init objects into proper sections
UPSTREAM: arm64: Add support for CLOCK_MONOTONIC_RAW in clock_gettime() vDSO
UPSTREAM: arm64: Refactor vDSO time functions
UPSTREAM: arm64: fix vdso-offsets.h dependency
UPSTREAM: kbuild: drop FORCE from PHONY targets
UPSTREAM: mm: add PHYS_PFN, use it in __phys_to_pfn()
UPSTREAM: ARM: 8476/1: VDSO: use PTR_ERR_OR_ZERO for vma check
ANDROID: sdcardfs: Fix style issues in macros
ANDROID: sdcardfs: Use seq_puts over seq_printf
ANDROID: sdcardfs: Use to kstrout
ANDROID: sdcardfs: Use pr_[...] instead of printk
ANDROID: sdcardfs: remove unneeded null check
ANDROID: sdcardfs: Fix style issues with comments
ANDROID: sdcardfs: Fix formatting
ANDROID: sdcardfs: correct order of descriptors
fix the deadlock in xt_qtaguid when enable DDEBUG
net: ipv6: Add sysctl for minimum prefix len acceptable in RIOs.
ANDROID: mmc: core: export emmc revision
BACKPORT: mmc: core: Export device lifetime information through sysfs
ANDROID: android-verity: do not compile as independent module
ANDROID: sched: fix duplicate sched_group_energy const specifiers
config: disable CONFIG_USELIB and CONFIG_FHANDLE
ANDROID: power: align wakeup_sources format
ANDROID: dm: android-verity: allow disable dm-verity for Treble VTS
uid_sys_stats: change to use rt_mutex
ANDROID: vfs: user permission2 in notify_change2
ANDROID: sdcardfs: Fix gid issue
ANDROID: sdcardfs: Use tabs instead of spaces in multiuser.h
ANDROID: sdcardfs: Remove uninformative prints
ANDROID: sdcardfs: move path_put outside of spinlock
ANDROID: sdcardfs: Use case insensitive hash function
ANDROID: sdcardfs: declare MODULE_ALIAS_FS
ANDROID: sdcardfs: Get the blocksize from the lower fs
ANDROID: sdcardfs: Use d_invalidate instead of drop_recurisve
ANDROID: sdcardfs: Switch to internal case insensitive compare
ANDROID: sdcardfs: Use spin_lock_nested
ANDROID: sdcardfs: Replace get/put with d_lock
ANDROID: sdcardfs: rate limit warning print
ANDROID: sdcardfs: Fix case insensitive lookup
ANDROID: uid_sys_stats: account for fsync syscalls
ANDROID: sched: add a counter to track fsync
ANDROID: uid_sys_stats: fix negative write bytes.
ANDROID: uid_sys_stats: allow writing same state
ANDROID: uid_sys_stats: rename uid_cputime.c to uid_sys_stats.c
ANDROID: uid_cputime: add per-uid IO usage accounting
DTB: Add EAS compatible Juno Energy model to 'juno.dts'
arm64: dts: juno: Add idle-states to device tree
ANDROID: Replace spaces by '_' for some android filesystem tracepoints.
usb: gadget: f_accessory: Fix for UsbAccessory clean unbind.
android: binder: move global binder state into context struct.
android: binder: add padding to binder_fd_array_object.
binder: use group leader instead of open thread
nf: IDLETIMER: Use fullsock when querying uid
nf: IDLETIMER: Fix use after free condition during work
ANDROID: dm: android-verity: fix table_make_digest() error handling
ANDROID: usb: gadget: function: Fix commenting style
cpufreq: interactive governor drops bits in time calculation
ANDROID: sdcardfs: support direct-IO (DIO) operations
ANDROID: sdcardfs: implement vm_ops->page_mkwrite
ANDROID: sdcardfs: Don't bother deleting freelist
ANDROID: sdcardfs: Add missing path_put
ANDROID: sdcardfs: Fix incorrect hash
ANDROID: ext4 crypto: Disables zeroing on truncation when there's no key
ANDROID: ext4: add a non-reversible key derivation method
ANDROID: ext4: allow encrypting filenames using HEH algorithm
ANDROID: arm64/crypto: add ARMv8-CE optimized poly_hash algorithm
ANDROID: crypto: heh - factor out poly_hash algorithm
ANDROID: crypto: heh - Add Hash-Encrypt-Hash (HEH) algorithm
ANDROID: crypto: gf128mul - Add ble multiplication functions
ANDROID: crypto: gf128mul - Refactor gf128 overflow macros and tables
UPSTREAM: crypto: gf128mul - Zero memory when freeing multiplication table
ANDROID: crypto: shash - Add crypto_grab_shash() and crypto_spawn_shash_alg()
ANDROID: crypto: allow blkcipher walks over ablkcipher data
UPSTREAM: arm/arm64: crypto: assure that ECB modes don't require an IV
ANDROID: Refactor fs readpage/write tracepoints.
ANDROID: export security_path_chown
Squashfs: optimize reading uncompressed data
Squashfs: implement .readpages()
Squashfs: replace buffer_head with BIO
Squashfs: refactor page_actor
Squashfs: remove the FILE_CACHE option
ANDROID: android-recommended.cfg: CONFIG_CPU_SW_DOMAIN_PAN=y
FROMLIST: 9p: fix a potential acl leak
BACKPORT: posix_acl: Clear SGID bit when setting file permissions
UPSTREAM: udp: properly support MSG_PEEK with truncated buffers
UPSTREAM: arm64: Allow hw watchpoint of length 3,5,6 and 7
BACKPORT: arm64: hw_breakpoint: Handle inexact watchpoint addresses
UPSTREAM: arm64: Allow hw watchpoint at varied offset from base address
BACKPORT: hw_breakpoint: Allow watchpoint of length 3,5,6 and 7
ANDROID: sdcardfs: Switch strcasecmp for internal call
ANDROID: sdcardfs: switch to full_name_hash and qstr
ANDROID: sdcardfs: Add GID Derivation to sdcardfs
ANDROID: sdcardfs: Remove redundant operation
ANDROID: sdcardfs: add support for user permission isolation
ANDROID: sdcardfs: Refactor configfs interface
ANDROID: sdcardfs: Allow non-owners to touch
ANDROID: binder: fix format specifier for type binder_size_t
ANDROID: fs: Export vfs_rmdir2
ANDROID: fs: Export free_fs_struct and set_fs_pwd
BACKPORT: Input: xpad - validate USB endpoint count during probe
BACKPORT: Input: xpad - fix oops when attaching an unknown Xbox One gamepad
ANDROID: mnt: remount should propagate to slaves of slaves
ANDROID: sdcardfs: Switch ->d_inode to d_inode()
ANDROID: sdcardfs: Fix locking issue with permision fix up
ANDROID: sdcardfs: Change magic value
ANDROID: sdcardfs: Use per mount permissions
ANDROID: sdcardfs: Add gid and mask to private mount data
ANDROID: sdcardfs: User new permission2 functions
ANDROID: vfs: Add setattr2 for filesystems with per mount permissions
ANDROID: vfs: Add permission2 for filesystems with per mount permissions
ANDROID: vfs: Allow filesystems to access their private mount data
ANDROID: mnt: Add filesystem private data to mount points
ANDROID: sdcardfs: Move directory unlock before touch
ANDROID: sdcardfs: fix external storage exporting incorrect uid
ANDROID: sdcardfs: Added top to sdcardfs_inode_info
ANDROID: sdcardfs: Switch package list to RCU
ANDROID: sdcardfs: Fix locking for permission fix up
ANDROID: sdcardfs: Check for other cases on path lookup
ANDROID: sdcardfs: override umask on mkdir and create
arm64: kernel: Fix build warning
DEBUG: sched/fair: Fix sched_load_avg_cpu events for task_groups
DEBUG: sched/fair: Fix missing sched_load_avg_cpu events
UPSTREAM: l2tp: fix racy SOCK_ZAPPED flag check in l2tp_ip{,6}_bind()
UPSTREAM: packet: fix race condition in packet_set_ring
UPSTREAM: netlink: Fix dump skb leak/double free
UPSTREAM: net: avoid signed overflows for SO_{SND|RCV}BUFFORCE
MIPS: Prevent "restoration" of MSA context in non-MSA kernels
net: socket: don't set sk_uid to garbage value in ->setattr()
ANDROID: configs: CONFIG_ARM64_SW_TTBR0_PAN=y
UPSTREAM: arm64: Disable PAN on uaccess_enable()
UPSTREAM: arm64: Enable CONFIG_ARM64_SW_TTBR0_PAN
UPSTREAM: arm64: xen: Enable user access before a privcmd hvc call
UPSTREAM: arm64: Handle faults caused by inadvertent user access with PAN enabled
BACKPORT: arm64: Disable TTBR0_EL1 during normal kernel execution
BACKPORT: arm64: Introduce uaccess_{disable,enable} functionality based on TTBR0_EL1
BACKPORT: arm64: Factor out TTBR0_EL1 post-update workaround into a specific asm macro
BACKPORT: arm64: Factor out PAN enabling/disabling into separate uaccess_* macros
UPSTREAM: arm64: alternative: add auto-nop infrastructure
UPSTREAM: arm64: barriers: introduce nops and __nops macros for NOP sequences
Revert "FROMLIST: arm64: Factor out PAN enabling/disabling into separate uaccess_* macros"
Revert "FROMLIST: arm64: Factor out TTBR0_EL1 post-update workaround into a specific asm macro"
Revert "FROMLIST: arm64: Introduce uaccess_{disable,enable} functionality based on TTBR0_EL1"
Revert "FROMLIST: arm64: Disable TTBR0_EL1 during normal kernel execution"
Revert "FROMLIST: arm64: Handle faults caused by inadvertent user access with PAN enabled"
Revert "FROMLIST: arm64: xen: Enable user access before a privcmd hvc call"
Revert "FROMLIST: arm64: Enable CONFIG_ARM64_SW_TTBR0_PAN"
ANDROID: sched/walt: fix build failure if FAIR_GROUP_SCHED=n
ANDROID: trace: net: use %pK for kernel pointers
ANDROID: android-base: Enable QUOTA related configs
net: ipv4: Don't crash if passing a null sk to ip_rt_update_pmtu.
net: inet: Support UID-based routing in IP protocols.
net: core: add UID to flows, rules, and routes
net: core: Add a UID field to struct sock.
Revert "net: core: Support UID-based routing."
UPSTREAM: efi/arm64: Don't apply MEMBLOCK_NOMAP to UEFI memory map mapping
UPSTREAM: arm64: mm: always take dirty state from new pte in ptep_set_access_flags
UPSTREAM: arm64: Implement pmdp_set_access_flags() for hardware AF/DBM
UPSTREAM: arm64: Fix typo in the pmdp_huge_get_and_clear() definition
UPSTREAM: arm64: enable CONFIG_DEBUG_RODATA by default
goldfish: enable CONFIG_INET_DIAG_DESTROY
sched/walt: kill {min,max}_capacity
sched: fix wrong truncation of walt_avg
build: fix build config kernel_dir
ANDROID: dm verity: add minimum prefetch size
build: add build server configs for goldfish
usb: gadget: Fix compilation problem with tx_qlen field
Conflicts:
Documentation/kasan.txt
Makefile
arch/arm64/Makefile
arch/arm64/boot/Makefile
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/signal32.h
arch/arm64/include/asm/suspend.h
arch/arm64/include/asm/vdso_datapage.h
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/head.S
arch/arm64/kernel/insn.c
arch/arm64/kernel/io.c
arch/arm64/kernel/psci.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/suspend.c
arch/arm64/kernel/traps.c
arch/arm64/kernel/vdso.c
arch/arm64/kernel/vdso/gettimeofday.S
arch/arm64/kernel/vmlinux.lds.S
arch/arm64/mm/cache.S
arch/arm64/mm/fault.c
arch/arm64/mm/kasan_init.c
arch/arm64/mm/mmu.c
arch/arm64/mm/proc.S
arch/x86/include/asm/thread_info.h
arch/x86/kernel/Makefile
arch/x86/kernel/kprobes/core.c
block/blk-core.c
build.config.common
drivers/Kconfig
drivers/Makefile
drivers/android/Makefile
drivers/android/binder.c
drivers/base/power/main.c
drivers/block/loop.c
drivers/clocksource/Kconfig
drivers/cpufreq/Kconfig
drivers/cpufreq/cpufreq-dt.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/irqchip/Kconfig
drivers/irqchip/Makefile
drivers/md/Kconfig
drivers/md/Makefile
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/uid_sys_stats.c
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/staging/android/Kconfig
drivers/staging/android/Makefile
drivers/staging/android/fiq_debugger/fiq_watchdog.h
drivers/staging/android/ion/ion.c
drivers/staging/android/ion/ion_cma_heap.c
drivers/tee/optee/shm_pool.h
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/function/f_accessory.c
fs/exec.c
fs/ext4/crypto.c
fs/ext4/crypto_fname.c
fs/ext4/crypto_key.c
fs/ext4/ext4.h
fs/ext4/ext4_crypto.h
fs/f2fs/crypto_policy.c
fs/f2fs/data.c
fs/f2fs/f2fs.h
fs/f2fs/super.c
fs/proc/task_mmu.c
fs/sdcardfs/derived_perm.c
fs/sdcardfs/inode.c
fs/sdcardfs/main.c
fs/sdcardfs/sdcardfs.h
fs/squashfs/lz4_wrapper.c
include/linux/blkdev.h
include/linux/cpufreq.h
include/linux/dcache.h
include/linux/mmc/card.h
include/linux/mmc/mmc.h
include/linux/msm_mdp.h
include/linux/sched.h
include/linux/slab_def.h
include/linux/slub_def.h
include/linux/thread_info.h
include/trace/events/android_fs.h
include/trace/events/sched.h
include/uapi/linux/android/binder.h
include/uapi/linux/ipv6.h
include/uapi/linux/prctl.h
kernel/configs/android-base.config
kernel/configs/android-recommended.config
kernel/cpu.c
kernel/fork.c
kernel/sched/Makefile
kernel/sched/core.c
kernel/sched/cpufreq_sched.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/sched/stats.c
kernel/sched/walt.c
kernel/sys.c
kernel/sysctl.c
kernel/time/timekeeping.c
lib/Kconfig
lib/test_kasan.c
mm/kasan/Makefile
mm/kasan/kasan.c
mm/kasan/kasan.h
mm/kasan/report.c
mm/slab.c
mm/slab.h
mm/slub.c
net/ipv4/route.c
net/ipv4/tcp_ipv4.c
net/ipv4/xfrm4_policy.c
net/ipv6/route.c
net/netfilter/xt_IDLETIMER.c
net/netfilter/xt_qtaguid.c
net/netfilter/xt_quota2.c
net/socket.c
net/wireless/scan.c
scripts/Makefile.lib
scripts/checkpatch.pl
security/selinux/nlmsgtab.c
Skipped commit:
a08cafa7e0 ANDROID: ARM64: Allow to choose appended kernel image
Change-Id: I306e14a74d75f56cd39b5ad344f0f4440c26b52a
2803 lines
71 KiB
C
2803 lines
71 KiB
C
/*
|
|
* linux/mm/vmalloc.c
|
|
*
|
|
* Copyright (C) 1993 Linus Torvalds
|
|
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
|
|
* SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
|
|
* Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
|
|
* Numa awareness, Christoph Lameter, SGI, June 2005
|
|
*/
|
|
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/module.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/proc_fs.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/debugobjects.h>
|
|
#include <linux/kallsyms.h>
|
|
#include <linux/list.h>
|
|
#include <linux/rbtree.h>
|
|
#include <linux/radix-tree.h>
|
|
#include <linux/rcupdate.h>
|
|
#include <linux/pfn.h>
|
|
#include <linux/kmemleak.h>
|
|
#include <linux/atomic.h>
|
|
#include <linux/compiler.h>
|
|
#include <linux/llist.h>
|
|
#include <linux/bitops.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/shmparam.h>
|
|
|
|
#include "internal.h"
|
|
|
|
struct vfree_deferred {
|
|
struct llist_head list;
|
|
struct work_struct wq;
|
|
};
|
|
static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
|
|
|
|
static void __vunmap(const void *, int);
|
|
|
|
static void free_work(struct work_struct *w)
|
|
{
|
|
struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
|
|
struct llist_node *llnode = llist_del_all(&p->list);
|
|
while (llnode) {
|
|
void *p = llnode;
|
|
llnode = llist_next(llnode);
|
|
__vunmap(p, 1);
|
|
}
|
|
}
|
|
|
|
/*** Page table manipulation functions ***/
|
|
|
|
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
|
|
{
|
|
pte_t *pte;
|
|
|
|
pte = pte_offset_kernel(pmd, addr);
|
|
do {
|
|
pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
|
|
WARN_ON(!pte_none(ptent) && !pte_present(ptent));
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
|
}
|
|
|
|
static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
|
|
{
|
|
pmd_t *pmd;
|
|
unsigned long next;
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
do {
|
|
next = pmd_addr_end(addr, end);
|
|
if (pmd_clear_huge(pmd))
|
|
continue;
|
|
if (pmd_none_or_clear_bad(pmd))
|
|
continue;
|
|
vunmap_pte_range(pmd, addr, next);
|
|
} while (pmd++, addr = next, addr != end);
|
|
}
|
|
|
|
static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
|
|
{
|
|
pud_t *pud;
|
|
unsigned long next;
|
|
|
|
pud = pud_offset(pgd, addr);
|
|
do {
|
|
next = pud_addr_end(addr, end);
|
|
if (pud_clear_huge(pud))
|
|
continue;
|
|
if (pud_none_or_clear_bad(pud))
|
|
continue;
|
|
vunmap_pmd_range(pud, addr, next);
|
|
} while (pud++, addr = next, addr != end);
|
|
}
|
|
|
|
static void vunmap_page_range(unsigned long addr, unsigned long end)
|
|
{
|
|
pgd_t *pgd;
|
|
unsigned long next;
|
|
|
|
BUG_ON(addr >= end);
|
|
pgd = pgd_offset_k(addr);
|
|
do {
|
|
next = pgd_addr_end(addr, end);
|
|
if (pgd_none_or_clear_bad(pgd))
|
|
continue;
|
|
vunmap_pud_range(pgd, addr, next);
|
|
} while (pgd++, addr = next, addr != end);
|
|
}
|
|
|
|
static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
|
|
unsigned long end, pgprot_t prot, struct page **pages, int *nr)
|
|
{
|
|
pte_t *pte;
|
|
|
|
/*
|
|
* nr is a running index into the array which helps higher level
|
|
* callers keep track of where we're up to.
|
|
*/
|
|
|
|
pte = pte_alloc_kernel(pmd, addr);
|
|
if (!pte)
|
|
return -ENOMEM;
|
|
do {
|
|
struct page *page = pages[*nr];
|
|
|
|
if (WARN_ON(!pte_none(*pte)))
|
|
return -EBUSY;
|
|
if (WARN_ON(!page))
|
|
return -ENOMEM;
|
|
set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
|
|
(*nr)++;
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
|
return 0;
|
|
}
|
|
|
|
static int vmap_pmd_range(pud_t *pud, unsigned long addr,
|
|
unsigned long end, pgprot_t prot, struct page **pages, int *nr)
|
|
{
|
|
pmd_t *pmd;
|
|
unsigned long next;
|
|
|
|
pmd = pmd_alloc(&init_mm, pud, addr);
|
|
if (!pmd)
|
|
return -ENOMEM;
|
|
do {
|
|
next = pmd_addr_end(addr, end);
|
|
if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
|
|
return -ENOMEM;
|
|
} while (pmd++, addr = next, addr != end);
|
|
return 0;
|
|
}
|
|
|
|
static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
|
|
unsigned long end, pgprot_t prot, struct page **pages, int *nr)
|
|
{
|
|
pud_t *pud;
|
|
unsigned long next;
|
|
|
|
pud = pud_alloc(&init_mm, pgd, addr);
|
|
if (!pud)
|
|
return -ENOMEM;
|
|
do {
|
|
next = pud_addr_end(addr, end);
|
|
if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
|
|
return -ENOMEM;
|
|
} while (pud++, addr = next, addr != end);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
|
|
* will have pfns corresponding to the "pages" array.
|
|
*
|
|
* Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
|
|
*/
|
|
static int vmap_page_range_noflush(unsigned long start, unsigned long end,
|
|
pgprot_t prot, struct page **pages)
|
|
{
|
|
pgd_t *pgd;
|
|
unsigned long next;
|
|
unsigned long addr = start;
|
|
int err = 0;
|
|
int nr = 0;
|
|
|
|
BUG_ON(addr >= end);
|
|
pgd = pgd_offset_k(addr);
|
|
do {
|
|
next = pgd_addr_end(addr, end);
|
|
err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
|
|
if (err)
|
|
return err;
|
|
} while (pgd++, addr = next, addr != end);
|
|
|
|
return nr;
|
|
}
|
|
|
|
static int vmap_page_range(unsigned long start, unsigned long end,
|
|
pgprot_t prot, struct page **pages)
|
|
{
|
|
int ret;
|
|
|
|
ret = vmap_page_range_noflush(start, end, prot, pages);
|
|
flush_cache_vmap(start, end);
|
|
return ret;
|
|
}
|
|
|
|
int is_vmalloc_or_module_addr(const void *x)
|
|
{
|
|
/*
|
|
* ARM, x86-64 and sparc64 put modules in a special place,
|
|
* and fall back on vmalloc() if that fails. Others
|
|
* just put it in the vmalloc space.
|
|
*/
|
|
#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
|
|
unsigned long addr = (unsigned long)x;
|
|
if (addr >= MODULES_VADDR && addr < MODULES_END)
|
|
return 1;
|
|
#endif
|
|
return is_vmalloc_addr(x);
|
|
}
|
|
|
|
/*
|
|
* Walk a vmap address to the struct page it maps.
|
|
*/
|
|
struct page *vmalloc_to_page(const void *vmalloc_addr)
|
|
{
|
|
unsigned long addr = (unsigned long) vmalloc_addr;
|
|
struct page *page = NULL;
|
|
pgd_t *pgd = pgd_offset_k(addr);
|
|
|
|
/*
|
|
* XXX we might need to change this if we add VIRTUAL_BUG_ON for
|
|
* architectures that do not vmalloc module space
|
|
*/
|
|
VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
|
|
|
|
if (!pgd_none(*pgd)) {
|
|
pud_t *pud = pud_offset(pgd, addr);
|
|
if (!pud_none(*pud)) {
|
|
pmd_t *pmd = pmd_offset(pud, addr);
|
|
if (!pmd_none(*pmd)) {
|
|
pte_t *ptep, pte;
|
|
|
|
ptep = pte_offset_map(pmd, addr);
|
|
pte = *ptep;
|
|
if (pte_present(pte))
|
|
page = pte_page(pte);
|
|
pte_unmap(ptep);
|
|
}
|
|
}
|
|
}
|
|
return page;
|
|
}
|
|
EXPORT_SYMBOL(vmalloc_to_page);
|
|
|
|
/*
|
|
* Map a vmalloc()-space virtual address to the physical page frame number.
|
|
*/
|
|
unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
|
|
{
|
|
return page_to_pfn(vmalloc_to_page(vmalloc_addr));
|
|
}
|
|
EXPORT_SYMBOL(vmalloc_to_pfn);
|
|
|
|
|
|
/*** Global kva allocator ***/
|
|
|
|
#define VM_LAZY_FREE 0x01
|
|
#define VM_LAZY_FREEING 0x02
|
|
#define VM_VM_AREA 0x04
|
|
|
|
static DEFINE_SPINLOCK(vmap_area_lock);
|
|
/* Export for kexec only */
|
|
LIST_HEAD(vmap_area_list);
|
|
static struct rb_root vmap_area_root = RB_ROOT;
|
|
|
|
/* The vmap cache globals are protected by vmap_area_lock */
|
|
static struct rb_node *free_vmap_cache;
|
|
static unsigned long cached_hole_size;
|
|
static unsigned long cached_vstart;
|
|
static unsigned long cached_align;
|
|
|
|
static unsigned long vmap_area_pcpu_hole;
|
|
|
|
#ifdef CONFIG_ENABLE_VMALLOC_SAVING
|
|
#define POSSIBLE_VMALLOC_START PAGE_OFFSET
|
|
|
|
#define VMALLOC_BITMAP_SIZE ((VMALLOC_END - PAGE_OFFSET) >> \
|
|
PAGE_SHIFT)
|
|
#define VMALLOC_TO_BIT(addr) ((addr - PAGE_OFFSET) >> PAGE_SHIFT)
|
|
#define BIT_TO_VMALLOC(i) (PAGE_OFFSET + i * PAGE_SIZE)
|
|
|
|
unsigned long total_vmalloc_size;
|
|
unsigned long vmalloc_reserved;
|
|
|
|
DECLARE_BITMAP(possible_areas, VMALLOC_BITMAP_SIZE);
|
|
|
|
void mark_vmalloc_reserved_area(void *x, unsigned long size)
|
|
{
|
|
unsigned long addr = (unsigned long)x;
|
|
|
|
bitmap_set(possible_areas, VMALLOC_TO_BIT(addr), size >> PAGE_SHIFT);
|
|
vmalloc_reserved += size;
|
|
}
|
|
|
|
int is_vmalloc_addr(const void *x)
|
|
{
|
|
unsigned long addr = (unsigned long)x;
|
|
|
|
if (addr < POSSIBLE_VMALLOC_START || addr >= VMALLOC_END)
|
|
return 0;
|
|
|
|
if (test_bit(VMALLOC_TO_BIT(addr), possible_areas))
|
|
return 0;
|
|
|
|
return 1;
|
|
}
|
|
|
|
static void calc_total_vmalloc_size(void)
|
|
{
|
|
total_vmalloc_size = VMALLOC_END - POSSIBLE_VMALLOC_START -
|
|
vmalloc_reserved;
|
|
}
|
|
#else
|
|
int is_vmalloc_addr(const void *x)
|
|
{
|
|
unsigned long addr = (unsigned long)x;
|
|
|
|
return addr >= VMALLOC_START && addr < VMALLOC_END;
|
|
}
|
|
|
|
static void calc_total_vmalloc_size(void) { }
|
|
#endif
|
|
EXPORT_SYMBOL(is_vmalloc_addr);
|
|
|
|
static struct vmap_area *__find_vmap_area(unsigned long addr)
|
|
{
|
|
struct rb_node *n = vmap_area_root.rb_node;
|
|
|
|
while (n) {
|
|
struct vmap_area *va;
|
|
|
|
va = rb_entry(n, struct vmap_area, rb_node);
|
|
if (addr < va->va_start)
|
|
n = n->rb_left;
|
|
else if (addr >= va->va_end)
|
|
n = n->rb_right;
|
|
else
|
|
return va;
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
static void __insert_vmap_area(struct vmap_area *va)
|
|
{
|
|
struct rb_node **p = &vmap_area_root.rb_node;
|
|
struct rb_node *parent = NULL;
|
|
struct rb_node *tmp;
|
|
|
|
while (*p) {
|
|
struct vmap_area *tmp_va;
|
|
|
|
parent = *p;
|
|
tmp_va = rb_entry(parent, struct vmap_area, rb_node);
|
|
if (va->va_start < tmp_va->va_end)
|
|
p = &(*p)->rb_left;
|
|
else if (va->va_end > tmp_va->va_start)
|
|
p = &(*p)->rb_right;
|
|
else
|
|
BUG();
|
|
}
|
|
|
|
rb_link_node(&va->rb_node, parent, p);
|
|
rb_insert_color(&va->rb_node, &vmap_area_root);
|
|
|
|
/* address-sort this list */
|
|
tmp = rb_prev(&va->rb_node);
|
|
if (tmp) {
|
|
struct vmap_area *prev;
|
|
prev = rb_entry(tmp, struct vmap_area, rb_node);
|
|
list_add_rcu(&va->list, &prev->list);
|
|
} else
|
|
list_add_rcu(&va->list, &vmap_area_list);
|
|
}
|
|
|
|
static void purge_vmap_area_lazy(void);
|
|
|
|
/*
|
|
* Allocate a region of KVA of the specified size and alignment, within the
|
|
* vstart and vend.
|
|
*/
|
|
static struct vmap_area *alloc_vmap_area(unsigned long size,
|
|
unsigned long align,
|
|
unsigned long vstart, unsigned long vend,
|
|
int node, gfp_t gfp_mask)
|
|
{
|
|
struct vmap_area *va;
|
|
struct rb_node *n;
|
|
unsigned long addr;
|
|
int purged = 0;
|
|
struct vmap_area *first;
|
|
|
|
BUG_ON(!size);
|
|
BUG_ON(offset_in_page(size));
|
|
BUG_ON(!is_power_of_2(align));
|
|
|
|
va = kmalloc_node(sizeof(struct vmap_area),
|
|
gfp_mask & GFP_RECLAIM_MASK, node);
|
|
if (unlikely(!va))
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
/*
|
|
* Only scan the relevant parts containing pointers to other objects
|
|
* to avoid false negatives.
|
|
*/
|
|
kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
|
|
|
|
retry:
|
|
spin_lock(&vmap_area_lock);
|
|
/*
|
|
* Invalidate cache if we have more permissive parameters.
|
|
* cached_hole_size notes the largest hole noticed _below_
|
|
* the vmap_area cached in free_vmap_cache: if size fits
|
|
* into that hole, we want to scan from vstart to reuse
|
|
* the hole instead of allocating above free_vmap_cache.
|
|
* Note that __free_vmap_area may update free_vmap_cache
|
|
* without updating cached_hole_size or cached_align.
|
|
*/
|
|
if (!free_vmap_cache ||
|
|
size < cached_hole_size ||
|
|
vstart < cached_vstart ||
|
|
align < cached_align) {
|
|
nocache:
|
|
cached_hole_size = 0;
|
|
free_vmap_cache = NULL;
|
|
}
|
|
/* record if we encounter less permissive parameters */
|
|
cached_vstart = vstart;
|
|
cached_align = align;
|
|
|
|
/* find starting point for our search */
|
|
if (free_vmap_cache) {
|
|
first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
|
|
addr = ALIGN(first->va_end, align);
|
|
if (addr < vstart)
|
|
goto nocache;
|
|
if (addr + size < addr)
|
|
goto overflow;
|
|
|
|
} else {
|
|
addr = ALIGN(vstart, align);
|
|
if (addr + size < addr)
|
|
goto overflow;
|
|
|
|
n = vmap_area_root.rb_node;
|
|
first = NULL;
|
|
|
|
while (n) {
|
|
struct vmap_area *tmp;
|
|
tmp = rb_entry(n, struct vmap_area, rb_node);
|
|
if (tmp->va_end >= addr) {
|
|
first = tmp;
|
|
if (tmp->va_start <= addr)
|
|
break;
|
|
n = n->rb_left;
|
|
} else
|
|
n = n->rb_right;
|
|
}
|
|
|
|
if (!first)
|
|
goto found;
|
|
}
|
|
|
|
/* from the starting point, walk areas until a suitable hole is found */
|
|
while (addr + size > first->va_start && addr + size <= vend) {
|
|
if (addr + cached_hole_size < first->va_start)
|
|
cached_hole_size = first->va_start - addr;
|
|
addr = ALIGN(first->va_end, align);
|
|
if (addr + size < addr)
|
|
goto overflow;
|
|
|
|
if (list_is_last(&first->list, &vmap_area_list))
|
|
goto found;
|
|
|
|
first = list_entry(first->list.next,
|
|
struct vmap_area, list);
|
|
}
|
|
|
|
found:
|
|
/*
|
|
* Check also calculated address against the vstart,
|
|
* because it can be 0 because of big align request.
|
|
*/
|
|
if (addr + size > vend || addr < vstart)
|
|
goto overflow;
|
|
|
|
va->va_start = addr;
|
|
va->va_end = addr + size;
|
|
va->flags = 0;
|
|
__insert_vmap_area(va);
|
|
free_vmap_cache = &va->rb_node;
|
|
spin_unlock(&vmap_area_lock);
|
|
|
|
BUG_ON(va->va_start & (align-1));
|
|
BUG_ON(va->va_start < vstart);
|
|
BUG_ON(va->va_end > vend);
|
|
|
|
return va;
|
|
|
|
overflow:
|
|
spin_unlock(&vmap_area_lock);
|
|
if (!purged) {
|
|
purge_vmap_area_lazy();
|
|
purged = 1;
|
|
goto retry;
|
|
}
|
|
if (printk_ratelimit())
|
|
pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
|
|
size);
|
|
kfree(va);
|
|
return ERR_PTR(-EBUSY);
|
|
}
|
|
|
|
static void __free_vmap_area(struct vmap_area *va)
|
|
{
|
|
BUG_ON(RB_EMPTY_NODE(&va->rb_node));
|
|
|
|
if (free_vmap_cache) {
|
|
if (va->va_end < cached_vstart) {
|
|
free_vmap_cache = NULL;
|
|
} else {
|
|
struct vmap_area *cache;
|
|
cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
|
|
if (va->va_start <= cache->va_start) {
|
|
free_vmap_cache = rb_prev(&va->rb_node);
|
|
/*
|
|
* We don't try to update cached_hole_size or
|
|
* cached_align, but it won't go very wrong.
|
|
*/
|
|
}
|
|
}
|
|
}
|
|
rb_erase(&va->rb_node, &vmap_area_root);
|
|
RB_CLEAR_NODE(&va->rb_node);
|
|
list_del_rcu(&va->list);
|
|
|
|
/*
|
|
* Track the highest possible candidate for pcpu area
|
|
* allocation. Areas outside of vmalloc area can be returned
|
|
* here too, consider only end addresses which fall inside
|
|
* vmalloc area proper.
|
|
*/
|
|
if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
|
|
vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);
|
|
|
|
kfree_rcu(va, rcu_head);
|
|
}
|
|
|
|
/*
|
|
* Free a region of KVA allocated by alloc_vmap_area
|
|
*/
|
|
static void free_vmap_area(struct vmap_area *va)
|
|
{
|
|
spin_lock(&vmap_area_lock);
|
|
__free_vmap_area(va);
|
|
spin_unlock(&vmap_area_lock);
|
|
}
|
|
|
|
/*
|
|
* Clear the pagetable entries of a given vmap_area
|
|
*/
|
|
static void unmap_vmap_area(struct vmap_area *va)
|
|
{
|
|
vunmap_page_range(va->va_start, va->va_end);
|
|
}
|
|
|
|
static void vmap_debug_free_range(unsigned long start, unsigned long end)
|
|
{
|
|
/*
|
|
* Unmap page tables and force a TLB flush immediately if
|
|
* CONFIG_DEBUG_PAGEALLOC is set. This catches use after free
|
|
* bugs similarly to those in linear kernel virtual address
|
|
* space after a page has been freed.
|
|
*
|
|
* All the lazy freeing logic is still retained, in order to
|
|
* minimise intrusiveness of this debugging feature.
|
|
*
|
|
* This is going to be *slow* (linear kernel virtual address
|
|
* debugging doesn't do a broadcast TLB flush so it is a lot
|
|
* faster).
|
|
*/
|
|
#ifdef CONFIG_DEBUG_PAGEALLOC
|
|
vunmap_page_range(start, end);
|
|
flush_tlb_kernel_range(start, end);
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* lazy_max_pages is the maximum amount of virtual address space we gather up
|
|
* before attempting to purge with a TLB flush.
|
|
*
|
|
* There is a tradeoff here: a larger number will cover more kernel page tables
|
|
* and take slightly longer to purge, but it will linearly reduce the number of
|
|
* global TLB flushes that must be performed. It would seem natural to scale
|
|
* this number up linearly with the number of CPUs (because vmapping activity
|
|
* could also scale linearly with the number of CPUs), however it is likely
|
|
* that in practice, workloads might be constrained in other ways that mean
|
|
* vmap activity will not scale linearly with CPUs. Also, I want to be
|
|
* conservative and not introduce a big latency on huge systems, so go with
|
|
* a less aggressive log scale. It will still be an improvement over the old
|
|
* code, and it will be simple to change the scale factor if we find that it
|
|
* becomes a problem on bigger systems.
|
|
*/
|
|
static unsigned long lazy_max_pages(void)
|
|
{
|
|
unsigned int log;
|
|
|
|
log = fls(num_online_cpus());
|
|
|
|
return log * (32UL * 1024 * 1024 / PAGE_SIZE);
|
|
}
|
|
|
|
static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
|
|
|
|
/* for per-CPU blocks */
|
|
static void purge_fragmented_blocks_allcpus(void);
|
|
|
|
/*
|
|
* called before a call to iounmap() if the caller wants vm_area_struct's
|
|
* immediately freed.
|
|
*/
|
|
void set_iounmap_nonlazy(void)
|
|
{
|
|
atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
|
|
}
|
|
|
|
/*
|
|
* Purges all lazily-freed vmap areas.
|
|
*
|
|
* If sync is 0 then don't purge if there is already a purge in progress.
|
|
* If force_flush is 1, then flush kernel TLBs between *start and *end even
|
|
* if we found no lazy vmap areas to unmap (callers can use this to optimise
|
|
* their own TLB flushing).
|
|
* Returns with *start = min(*start, lowest purged address)
|
|
* *end = max(*end, highest purged address)
|
|
*/
|
|
static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
|
|
int sync, int force_flush)
|
|
{
|
|
static DEFINE_SPINLOCK(purge_lock);
|
|
LIST_HEAD(valist);
|
|
struct vmap_area *va;
|
|
struct vmap_area *n_va;
|
|
int nr = 0;
|
|
|
|
/*
|
|
* If sync is 0 but force_flush is 1, we'll go sync anyway but callers
|
|
* should not expect such behaviour. This just simplifies locking for
|
|
* the case that isn't actually used at the moment anyway.
|
|
*/
|
|
if (!sync && !force_flush) {
|
|
if (!spin_trylock(&purge_lock))
|
|
return;
|
|
} else
|
|
spin_lock(&purge_lock);
|
|
|
|
if (sync)
|
|
purge_fragmented_blocks_allcpus();
|
|
|
|
rcu_read_lock();
|
|
list_for_each_entry_rcu(va, &vmap_area_list, list) {
|
|
if (va->flags & VM_LAZY_FREE) {
|
|
if (va->va_start < *start)
|
|
*start = va->va_start;
|
|
if (va->va_end > *end)
|
|
*end = va->va_end;
|
|
nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
|
|
list_add_tail(&va->purge_list, &valist);
|
|
va->flags |= VM_LAZY_FREEING;
|
|
va->flags &= ~VM_LAZY_FREE;
|
|
}
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
if (nr)
|
|
atomic_sub(nr, &vmap_lazy_nr);
|
|
|
|
if (nr || force_flush)
|
|
flush_tlb_kernel_range(*start, *end);
|
|
|
|
if (nr) {
|
|
spin_lock(&vmap_area_lock);
|
|
list_for_each_entry_safe(va, n_va, &valist, purge_list)
|
|
__free_vmap_area(va);
|
|
spin_unlock(&vmap_area_lock);
|
|
}
|
|
spin_unlock(&purge_lock);
|
|
}
|
|
|
|
/*
|
|
* Kick off a purge of the outstanding lazy areas. Don't bother if somebody
|
|
* is already purging.
|
|
*/
|
|
static void try_purge_vmap_area_lazy(void)
|
|
{
|
|
unsigned long start = ULONG_MAX, end = 0;
|
|
|
|
__purge_vmap_area_lazy(&start, &end, 0, 0);
|
|
}
|
|
|
|
/*
|
|
* Kick off a purge of the outstanding lazy areas.
|
|
*/
|
|
static void purge_vmap_area_lazy(void)
|
|
{
|
|
unsigned long start = ULONG_MAX, end = 0;
|
|
|
|
__purge_vmap_area_lazy(&start, &end, 1, 0);
|
|
}
|
|
|
|
/*
|
|
* Free a vmap area, caller ensuring that the area has been unmapped
|
|
* and flush_cache_vunmap had been called for the correct range
|
|
* previously.
|
|
*/
|
|
static void free_vmap_area_noflush(struct vmap_area *va)
|
|
{
|
|
va->flags |= VM_LAZY_FREE;
|
|
atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
|
|
if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
|
|
try_purge_vmap_area_lazy();
|
|
}
|
|
|
|
/*
|
|
* Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
|
|
* called for the correct range previously.
|
|
*/
|
|
static void free_unmap_vmap_area_noflush(struct vmap_area *va)
|
|
{
|
|
unmap_vmap_area(va);
|
|
free_vmap_area_noflush(va);
|
|
}
|
|
|
|
/*
|
|
* Free and unmap a vmap area
|
|
*/
|
|
static void free_unmap_vmap_area(struct vmap_area *va)
|
|
{
|
|
flush_cache_vunmap(va->va_start, va->va_end);
|
|
free_unmap_vmap_area_noflush(va);
|
|
}
|
|
|
|
static struct vmap_area *find_vmap_area(unsigned long addr)
|
|
{
|
|
struct vmap_area *va;
|
|
|
|
spin_lock(&vmap_area_lock);
|
|
va = __find_vmap_area(addr);
|
|
spin_unlock(&vmap_area_lock);
|
|
|
|
return va;
|
|
}
|
|
|
|
static void free_unmap_vmap_area_addr(unsigned long addr)
|
|
{
|
|
struct vmap_area *va;
|
|
|
|
va = find_vmap_area(addr);
|
|
BUG_ON(!va);
|
|
free_unmap_vmap_area(va);
|
|
}
|
|
|
|
|
|
/*** Per cpu kva allocator ***/
|
|
|
|
/*
|
|
* vmap space is limited especially on 32 bit architectures. Ensure there is
|
|
* room for at least 16 percpu vmap blocks per CPU.
|
|
*/
|
|
/*
|
|
* If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
|
|
* to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
|
|
* instead (we just need a rough idea)
|
|
*/
|
|
#if BITS_PER_LONG == 32
|
|
#define VMALLOC_SPACE (128UL*1024*1024)
|
|
#else
|
|
#define VMALLOC_SPACE (128UL*1024*1024*1024)
|
|
#endif
|
|
|
|
#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
|
|
#define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
|
|
#define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
|
|
#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
|
|
#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
|
|
#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
|
|
#define VMAP_BBMAP_BITS \
|
|
VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
|
|
VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
|
|
VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
|
|
|
|
#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
|
|
|
|
static bool vmap_initialized __read_mostly = false;
|
|
|
|
struct vmap_block_queue {
|
|
spinlock_t lock;
|
|
struct list_head free;
|
|
};
|
|
|
|
struct vmap_block {
|
|
spinlock_t lock;
|
|
struct vmap_area *va;
|
|
unsigned long free, dirty;
|
|
unsigned long dirty_min, dirty_max; /*< dirty range */
|
|
struct list_head free_list;
|
|
struct rcu_head rcu_head;
|
|
struct list_head purge;
|
|
};
|
|
|
|
/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
|
|
static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
|
|
|
|
/*
|
|
* Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
|
|
* in the free path. Could get rid of this if we change the API to return a
|
|
* "cookie" from alloc, to be passed to free. But no big deal yet.
|
|
*/
|
|
static DEFINE_SPINLOCK(vmap_block_tree_lock);
|
|
static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
|
|
|
|
/*
|
|
* We should probably have a fallback mechanism to allocate virtual memory
|
|
* out of partially filled vmap blocks. However vmap block sizing should be
|
|
* fairly reasonable according to the vmalloc size, so it shouldn't be a
|
|
* big problem.
|
|
*/
|
|
|
|
static unsigned long addr_to_vb_idx(unsigned long addr)
|
|
{
|
|
addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
|
|
addr /= VMAP_BLOCK_SIZE;
|
|
return addr;
|
|
}
|
|
|
|
static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
|
|
{
|
|
unsigned long addr;
|
|
|
|
addr = va_start + (pages_off << PAGE_SHIFT);
|
|
BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
|
|
return (void *)addr;
|
|
}
|
|
|
|
/**
|
|
* new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
|
|
* block. Of course pages number can't exceed VMAP_BBMAP_BITS
|
|
* @order: how many 2^order pages should be occupied in newly allocated block
|
|
* @gfp_mask: flags for the page level allocator
|
|
*
|
|
* Returns: virtual address in a newly allocated block or ERR_PTR(-errno)
|
|
*/
|
|
static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
|
|
{
|
|
struct vmap_block_queue *vbq;
|
|
struct vmap_block *vb;
|
|
struct vmap_area *va;
|
|
unsigned long vb_idx;
|
|
int node, err;
|
|
void *vaddr;
|
|
|
|
node = numa_node_id();
|
|
|
|
vb = kmalloc_node(sizeof(struct vmap_block),
|
|
gfp_mask & GFP_RECLAIM_MASK, node);
|
|
if (unlikely(!vb))
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
|
|
VMALLOC_START, VMALLOC_END,
|
|
node, gfp_mask);
|
|
if (IS_ERR(va)) {
|
|
kfree(vb);
|
|
return ERR_CAST(va);
|
|
}
|
|
|
|
err = radix_tree_preload(gfp_mask);
|
|
if (unlikely(err)) {
|
|
kfree(vb);
|
|
free_vmap_area(va);
|
|
return ERR_PTR(err);
|
|
}
|
|
|
|
vaddr = vmap_block_vaddr(va->va_start, 0);
|
|
spin_lock_init(&vb->lock);
|
|
vb->va = va;
|
|
/* At least something should be left free */
|
|
BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
|
|
vb->free = VMAP_BBMAP_BITS - (1UL << order);
|
|
vb->dirty = 0;
|
|
vb->dirty_min = VMAP_BBMAP_BITS;
|
|
vb->dirty_max = 0;
|
|
INIT_LIST_HEAD(&vb->free_list);
|
|
|
|
vb_idx = addr_to_vb_idx(va->va_start);
|
|
spin_lock(&vmap_block_tree_lock);
|
|
err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
|
|
spin_unlock(&vmap_block_tree_lock);
|
|
BUG_ON(err);
|
|
radix_tree_preload_end();
|
|
|
|
vbq = &get_cpu_var(vmap_block_queue);
|
|
spin_lock(&vbq->lock);
|
|
list_add_tail_rcu(&vb->free_list, &vbq->free);
|
|
spin_unlock(&vbq->lock);
|
|
put_cpu_var(vmap_block_queue);
|
|
|
|
return vaddr;
|
|
}
|
|
|
|
static void free_vmap_block(struct vmap_block *vb)
|
|
{
|
|
struct vmap_block *tmp;
|
|
unsigned long vb_idx;
|
|
|
|
vb_idx = addr_to_vb_idx(vb->va->va_start);
|
|
spin_lock(&vmap_block_tree_lock);
|
|
tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
|
|
spin_unlock(&vmap_block_tree_lock);
|
|
BUG_ON(tmp != vb);
|
|
|
|
free_vmap_area_noflush(vb->va);
|
|
kfree_rcu(vb, rcu_head);
|
|
}
|
|
|
|
static void purge_fragmented_blocks(int cpu)
|
|
{
|
|
LIST_HEAD(purge);
|
|
struct vmap_block *vb;
|
|
struct vmap_block *n_vb;
|
|
struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
|
|
|
|
rcu_read_lock();
|
|
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
|
|
|
|
if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
|
|
continue;
|
|
|
|
spin_lock(&vb->lock);
|
|
if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
|
|
vb->free = 0; /* prevent further allocs after releasing lock */
|
|
vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
|
|
vb->dirty_min = 0;
|
|
vb->dirty_max = VMAP_BBMAP_BITS;
|
|
spin_lock(&vbq->lock);
|
|
list_del_rcu(&vb->free_list);
|
|
spin_unlock(&vbq->lock);
|
|
spin_unlock(&vb->lock);
|
|
list_add_tail(&vb->purge, &purge);
|
|
} else
|
|
spin_unlock(&vb->lock);
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
list_for_each_entry_safe(vb, n_vb, &purge, purge) {
|
|
list_del(&vb->purge);
|
|
free_vmap_block(vb);
|
|
}
|
|
}
|
|
|
|
static void purge_fragmented_blocks_allcpus(void)
|
|
{
|
|
int cpu;
|
|
|
|
for_each_possible_cpu(cpu)
|
|
purge_fragmented_blocks(cpu);
|
|
}
|
|
|
|
static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
|
|
{
|
|
struct vmap_block_queue *vbq;
|
|
struct vmap_block *vb;
|
|
void *vaddr = NULL;
|
|
unsigned int order;
|
|
|
|
BUG_ON(offset_in_page(size));
|
|
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
|
|
if (WARN_ON(size == 0)) {
|
|
/*
|
|
* Allocating 0 bytes isn't what caller wants since
|
|
* get_order(0) returns funny result. Just warn and terminate
|
|
* early.
|
|
*/
|
|
return NULL;
|
|
}
|
|
order = get_order(size);
|
|
|
|
rcu_read_lock();
|
|
vbq = &get_cpu_var(vmap_block_queue);
|
|
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
|
|
unsigned long pages_off;
|
|
|
|
spin_lock(&vb->lock);
|
|
if (vb->free < (1UL << order)) {
|
|
spin_unlock(&vb->lock);
|
|
continue;
|
|
}
|
|
|
|
pages_off = VMAP_BBMAP_BITS - vb->free;
|
|
vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
|
|
vb->free -= 1UL << order;
|
|
if (vb->free == 0) {
|
|
spin_lock(&vbq->lock);
|
|
list_del_rcu(&vb->free_list);
|
|
spin_unlock(&vbq->lock);
|
|
}
|
|
|
|
spin_unlock(&vb->lock);
|
|
break;
|
|
}
|
|
|
|
put_cpu_var(vmap_block_queue);
|
|
rcu_read_unlock();
|
|
|
|
/* Allocate new block if nothing was found */
|
|
if (!vaddr)
|
|
vaddr = new_vmap_block(order, gfp_mask);
|
|
|
|
return vaddr;
|
|
}
|
|
|
|
static void vb_free(const void *addr, unsigned long size)
|
|
{
|
|
unsigned long offset;
|
|
unsigned long vb_idx;
|
|
unsigned int order;
|
|
struct vmap_block *vb;
|
|
|
|
BUG_ON(offset_in_page(size));
|
|
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
|
|
|
|
flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
|
|
|
|
order = get_order(size);
|
|
|
|
offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
|
|
offset >>= PAGE_SHIFT;
|
|
|
|
vb_idx = addr_to_vb_idx((unsigned long)addr);
|
|
rcu_read_lock();
|
|
vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
|
|
rcu_read_unlock();
|
|
BUG_ON(!vb);
|
|
|
|
vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
|
|
|
|
spin_lock(&vb->lock);
|
|
|
|
/* Expand dirty range */
|
|
vb->dirty_min = min(vb->dirty_min, offset);
|
|
vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
|
|
|
|
vb->dirty += 1UL << order;
|
|
if (vb->dirty == VMAP_BBMAP_BITS) {
|
|
BUG_ON(vb->free);
|
|
spin_unlock(&vb->lock);
|
|
free_vmap_block(vb);
|
|
} else
|
|
spin_unlock(&vb->lock);
|
|
}
|
|
|
|
/**
|
|
* vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
|
|
*
|
|
* The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
|
|
* to amortize TLB flushing overheads. What this means is that any page you
|
|
* have now, may, in a former life, have been mapped into kernel virtual
|
|
* address by the vmap layer and so there might be some CPUs with TLB entries
|
|
* still referencing that page (additional to the regular 1:1 kernel mapping).
|
|
*
|
|
* vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
|
|
* be sure that none of the pages we have control over will have any aliases
|
|
* from the vmap layer.
|
|
*/
|
|
void vm_unmap_aliases(void)
|
|
{
|
|
unsigned long start = ULONG_MAX, end = 0;
|
|
int cpu;
|
|
int flush = 0;
|
|
|
|
if (unlikely(!vmap_initialized))
|
|
return;
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
|
|
struct vmap_block *vb;
|
|
|
|
rcu_read_lock();
|
|
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
|
|
spin_lock(&vb->lock);
|
|
if (vb->dirty) {
|
|
unsigned long va_start = vb->va->va_start;
|
|
unsigned long s, e;
|
|
|
|
s = va_start + (vb->dirty_min << PAGE_SHIFT);
|
|
e = va_start + (vb->dirty_max << PAGE_SHIFT);
|
|
|
|
start = min(s, start);
|
|
end = max(e, end);
|
|
|
|
flush = 1;
|
|
}
|
|
spin_unlock(&vb->lock);
|
|
}
|
|
rcu_read_unlock();
|
|
}
|
|
|
|
__purge_vmap_area_lazy(&start, &end, 1, flush);
|
|
}
|
|
EXPORT_SYMBOL_GPL(vm_unmap_aliases);
|
|
|
|
/**
|
|
* vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
|
|
* @mem: the pointer returned by vm_map_ram
|
|
* @count: the count passed to that vm_map_ram call (cannot unmap partial)
|
|
*/
|
|
void vm_unmap_ram(const void *mem, unsigned int count)
|
|
{
|
|
unsigned long size = count << PAGE_SHIFT;
|
|
unsigned long addr = (unsigned long)mem;
|
|
|
|
BUG_ON(!addr);
|
|
BUG_ON(addr < VMALLOC_START);
|
|
BUG_ON(addr > VMALLOC_END);
|
|
BUG_ON(addr & (PAGE_SIZE-1));
|
|
|
|
debug_check_no_locks_freed(mem, size);
|
|
vmap_debug_free_range(addr, addr+size);
|
|
|
|
if (likely(count <= VMAP_MAX_ALLOC))
|
|
vb_free(mem, size);
|
|
else
|
|
free_unmap_vmap_area_addr(addr);
|
|
}
|
|
EXPORT_SYMBOL(vm_unmap_ram);
|
|
|
|
/**
|
|
* vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
|
|
* @pages: an array of pointers to the pages to be mapped
|
|
* @count: number of pages
|
|
* @node: prefer to allocate data structures on this node
|
|
* @prot: memory protection to use. PAGE_KERNEL for regular RAM
|
|
*
|
|
* If you use this function for less than VMAP_MAX_ALLOC pages, it could be
|
|
* faster than vmap so it's good. But if you mix long-life and short-life
|
|
* objects with vm_map_ram(), it could consume lots of address space through
|
|
* fragmentation (especially on a 32bit machine). You could see failures in
|
|
* the end. Please use this function for short-lived objects.
|
|
*
|
|
* Returns: a pointer to the address that has been mapped, or %NULL on failure
|
|
*/
|
|
void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
|
|
{
|
|
unsigned long size = count << PAGE_SHIFT;
|
|
unsigned long addr;
|
|
void *mem;
|
|
|
|
if (likely(count <= VMAP_MAX_ALLOC)) {
|
|
mem = vb_alloc(size, GFP_KERNEL);
|
|
if (IS_ERR(mem))
|
|
return NULL;
|
|
addr = (unsigned long)mem;
|
|
} else {
|
|
struct vmap_area *va;
|
|
va = alloc_vmap_area(size, PAGE_SIZE,
|
|
VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
|
|
if (IS_ERR(va))
|
|
return NULL;
|
|
|
|
addr = va->va_start;
|
|
mem = (void *)addr;
|
|
}
|
|
if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
|
|
vm_unmap_ram(mem, count);
|
|
return NULL;
|
|
}
|
|
return mem;
|
|
}
|
|
EXPORT_SYMBOL(vm_map_ram);
|
|
|
|
static struct vm_struct *vmlist __initdata;
|
|
|
|
/**
|
|
* vm_area_check_early - check if vmap area is already mapped
|
|
* @vm: vm_struct to be checked
|
|
*
|
|
* This function is used to check if the vmap area has been
|
|
* mapped already. @vm->addr, @vm->size and @vm->flags should
|
|
* contain proper values.
|
|
*
|
|
*/
|
|
int __init vm_area_check_early(struct vm_struct *vm)
|
|
{
|
|
struct vm_struct *tmp, **p;
|
|
|
|
BUG_ON(vmap_initialized);
|
|
for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
|
|
if (tmp->addr >= vm->addr) {
|
|
if (tmp->addr < vm->addr + vm->size)
|
|
return 1;
|
|
} else {
|
|
if (tmp->addr + tmp->size > vm->addr)
|
|
return 1;
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* vm_area_add_early - add vmap area early during boot
|
|
* @vm: vm_struct to add
|
|
*
|
|
* This function is used to add fixed kernel vm area to vmlist before
|
|
* vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
|
|
* should contain proper values and the other fields should be zero.
|
|
*
|
|
* DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
|
|
*/
|
|
void __init vm_area_add_early(struct vm_struct *vm)
|
|
{
|
|
struct vm_struct *tmp, **p;
|
|
|
|
BUG_ON(vmap_initialized);
|
|
for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
|
|
if (tmp->addr >= vm->addr) {
|
|
BUG_ON(tmp->addr < vm->addr + vm->size);
|
|
break;
|
|
} else
|
|
BUG_ON(tmp->addr + tmp->size > vm->addr);
|
|
}
|
|
vm->next = *p;
|
|
*p = vm;
|
|
}
|
|
|
|
/**
|
|
* vm_area_register_early - register vmap area early during boot
|
|
* @vm: vm_struct to register
|
|
* @align: requested alignment
|
|
*
|
|
* This function is used to register kernel vm area before
|
|
* vmalloc_init() is called. @vm->size and @vm->flags should contain
|
|
* proper values on entry and other fields should be zero. On return,
|
|
* vm->addr contains the allocated address.
|
|
*
|
|
* DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
|
|
*/
|
|
void __init vm_area_register_early(struct vm_struct *vm, size_t align)
|
|
{
|
|
static size_t vm_init_off __initdata;
|
|
unsigned long addr;
|
|
|
|
addr = ALIGN(VMALLOC_START + vm_init_off, align);
|
|
vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
|
|
|
|
vm->addr = (void *)addr;
|
|
|
|
vm_area_add_early(vm);
|
|
}
|
|
|
|
void __init vmalloc_init(void)
|
|
{
|
|
struct vmap_area *va;
|
|
struct vm_struct *tmp;
|
|
int i;
|
|
|
|
for_each_possible_cpu(i) {
|
|
struct vmap_block_queue *vbq;
|
|
struct vfree_deferred *p;
|
|
|
|
vbq = &per_cpu(vmap_block_queue, i);
|
|
spin_lock_init(&vbq->lock);
|
|
INIT_LIST_HEAD(&vbq->free);
|
|
p = &per_cpu(vfree_deferred, i);
|
|
init_llist_head(&p->list);
|
|
INIT_WORK(&p->wq, free_work);
|
|
}
|
|
|
|
/* Import existing vmlist entries. */
|
|
for (tmp = vmlist; tmp; tmp = tmp->next) {
|
|
va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
|
|
va->flags = VM_VM_AREA;
|
|
va->va_start = (unsigned long)tmp->addr;
|
|
va->va_end = va->va_start + tmp->size;
|
|
va->vm = tmp;
|
|
__insert_vmap_area(va);
|
|
}
|
|
|
|
vmap_area_pcpu_hole = VMALLOC_END;
|
|
calc_total_vmalloc_size();
|
|
vmap_initialized = true;
|
|
}
|
|
|
|
/**
|
|
* map_kernel_range_noflush - map kernel VM area with the specified pages
|
|
* @addr: start of the VM area to map
|
|
* @size: size of the VM area to map
|
|
* @prot: page protection flags to use
|
|
* @pages: pages to map
|
|
*
|
|
* Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
|
|
* specify should have been allocated using get_vm_area() and its
|
|
* friends.
|
|
*
|
|
* NOTE:
|
|
* This function does NOT do any cache flushing. The caller is
|
|
* responsible for calling flush_cache_vmap() on to-be-mapped areas
|
|
* before calling this function.
|
|
*
|
|
* RETURNS:
|
|
* The number of pages mapped on success, -errno on failure.
|
|
*/
|
|
int map_kernel_range_noflush(unsigned long addr, unsigned long size,
|
|
pgprot_t prot, struct page **pages)
|
|
{
|
|
return vmap_page_range_noflush(addr, addr + size, prot, pages);
|
|
}
|
|
|
|
/**
|
|
* unmap_kernel_range_noflush - unmap kernel VM area
|
|
* @addr: start of the VM area to unmap
|
|
* @size: size of the VM area to unmap
|
|
*
|
|
* Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
|
|
* specify should have been allocated using get_vm_area() and its
|
|
* friends.
|
|
*
|
|
* NOTE:
|
|
* This function does NOT do any cache flushing. The caller is
|
|
* responsible for calling flush_cache_vunmap() on to-be-mapped areas
|
|
* before calling this function and flush_tlb_kernel_range() after.
|
|
*/
|
|
void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
|
|
{
|
|
vunmap_page_range(addr, addr + size);
|
|
}
|
|
EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
|
|
|
|
/**
|
|
* unmap_kernel_range - unmap kernel VM area and flush cache and TLB
|
|
* @addr: start of the VM area to unmap
|
|
* @size: size of the VM area to unmap
|
|
*
|
|
* Similar to unmap_kernel_range_noflush() but flushes vcache before
|
|
* the unmapping and tlb after.
|
|
*/
|
|
void unmap_kernel_range(unsigned long addr, unsigned long size)
|
|
{
|
|
unsigned long end = addr + size;
|
|
|
|
flush_cache_vunmap(addr, end);
|
|
vunmap_page_range(addr, end);
|
|
flush_tlb_kernel_range(addr, end);
|
|
}
|
|
EXPORT_SYMBOL_GPL(unmap_kernel_range);
|
|
|
|
int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
|
|
{
|
|
unsigned long addr = (unsigned long)area->addr;
|
|
unsigned long end = addr + get_vm_area_size(area);
|
|
int err;
|
|
|
|
err = vmap_page_range(addr, end, prot, pages);
|
|
|
|
return err > 0 ? 0 : err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(map_vm_area);
|
|
|
|
static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
|
|
unsigned long flags, const void *caller)
|
|
{
|
|
spin_lock(&vmap_area_lock);
|
|
vm->flags = flags;
|
|
vm->addr = (void *)va->va_start;
|
|
vm->size = va->va_end - va->va_start;
|
|
vm->caller = caller;
|
|
va->vm = vm;
|
|
va->flags |= VM_VM_AREA;
|
|
spin_unlock(&vmap_area_lock);
|
|
}
|
|
|
|
static void clear_vm_uninitialized_flag(struct vm_struct *vm)
|
|
{
|
|
/*
|
|
* Before removing VM_UNINITIALIZED,
|
|
* we should make sure that vm has proper values.
|
|
* Pair with smp_rmb() in show_numa_info().
|
|
*/
|
|
smp_wmb();
|
|
vm->flags &= ~VM_UNINITIALIZED;
|
|
}
|
|
|
|
static struct vm_struct *__get_vm_area_node(unsigned long size,
|
|
unsigned long align, unsigned long flags, unsigned long start,
|
|
unsigned long end, int node, gfp_t gfp_mask, const void *caller)
|
|
{
|
|
struct vmap_area *va;
|
|
struct vm_struct *area;
|
|
|
|
BUG_ON(in_interrupt());
|
|
if (flags & VM_IOREMAP)
|
|
align = 1ul << clamp_t(int, fls_long(size),
|
|
PAGE_SHIFT, IOREMAP_MAX_ORDER);
|
|
|
|
size = PAGE_ALIGN(size);
|
|
if (unlikely(!size))
|
|
return NULL;
|
|
|
|
area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
|
|
if (unlikely(!area))
|
|
return NULL;
|
|
|
|
if (!(flags & VM_NO_GUARD))
|
|
size += PAGE_SIZE;
|
|
|
|
va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
|
|
if (IS_ERR(va)) {
|
|
kfree(area);
|
|
return NULL;
|
|
}
|
|
|
|
setup_vmalloc_vm(area, va, flags, caller);
|
|
|
|
return area;
|
|
}
|
|
|
|
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
|
|
GFP_KERNEL, __builtin_return_address(0));
|
|
}
|
|
EXPORT_SYMBOL_GPL(__get_vm_area);
|
|
|
|
struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
|
|
unsigned long start, unsigned long end,
|
|
const void *caller)
|
|
{
|
|
return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
|
|
GFP_KERNEL, caller);
|
|
}
|
|
|
|
/**
|
|
* get_vm_area - reserve a contiguous kernel virtual area
|
|
* @size: size of the area
|
|
* @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
|
|
*
|
|
* Search an area of @size in the kernel virtual mapping area,
|
|
* and reserved it for out purposes. Returns the area descriptor
|
|
* on success or %NULL on failure.
|
|
*/
|
|
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
|
|
{
|
|
#ifdef CONFIG_ENABLE_VMALLOC_SAVING
|
|
return __get_vm_area_node(size, 1, flags, PAGE_OFFSET, VMALLOC_END,
|
|
NUMA_NO_NODE, GFP_KERNEL,
|
|
__builtin_return_address(0));
|
|
#else
|
|
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
|
|
NUMA_NO_NODE, GFP_KERNEL,
|
|
__builtin_return_address(0));
|
|
#endif
|
|
}
|
|
|
|
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
|
|
const void *caller)
|
|
{
|
|
#ifdef CONFIG_ENABLE_VMALLOC_SAVING
|
|
return __get_vm_area_node(size, 1, flags, PAGE_OFFSET, VMALLOC_END,
|
|
NUMA_NO_NODE, GFP_KERNEL, caller);
|
|
#else
|
|
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
|
|
NUMA_NO_NODE, GFP_KERNEL, caller);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* find_vm_area - find a continuous kernel virtual area
|
|
* @addr: base address
|
|
*
|
|
* Search for the kernel VM area starting at @addr, and return it.
|
|
* It is up to the caller to do all required locking to keep the returned
|
|
* pointer valid.
|
|
*/
|
|
struct vm_struct *find_vm_area(const void *addr)
|
|
{
|
|
struct vmap_area *va;
|
|
|
|
va = find_vmap_area((unsigned long)addr);
|
|
if (va && va->flags & VM_VM_AREA)
|
|
return va->vm;
|
|
|
|
return NULL;
|
|
}
|
|
|
|
/**
|
|
* remove_vm_area - find and remove a continuous kernel virtual area
|
|
* @addr: base address
|
|
*
|
|
* Search for the kernel VM area starting at @addr, and remove it.
|
|
* This function returns the found VM area, but using it is NOT safe
|
|
* on SMP machines, except for its size or flags.
|
|
*/
|
|
struct vm_struct *remove_vm_area(const void *addr)
|
|
{
|
|
struct vmap_area *va;
|
|
|
|
va = find_vmap_area((unsigned long)addr);
|
|
if (va && va->flags & VM_VM_AREA) {
|
|
struct vm_struct *vm = va->vm;
|
|
|
|
spin_lock(&vmap_area_lock);
|
|
va->vm = NULL;
|
|
va->flags &= ~VM_VM_AREA;
|
|
spin_unlock(&vmap_area_lock);
|
|
|
|
vmap_debug_free_range(va->va_start, va->va_end);
|
|
kasan_free_shadow(vm);
|
|
free_unmap_vmap_area(va);
|
|
|
|
return vm;
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
static void __vunmap(const void *addr, int deallocate_pages)
|
|
{
|
|
struct vm_struct *area;
|
|
|
|
if (!addr)
|
|
return;
|
|
|
|
if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
|
|
addr))
|
|
return;
|
|
|
|
area = find_vm_area(addr);
|
|
if (unlikely(!area)) {
|
|
WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
|
|
addr);
|
|
return;
|
|
}
|
|
|
|
debug_check_no_locks_freed(addr, get_vm_area_size(area));
|
|
debug_check_no_obj_freed(addr, get_vm_area_size(area));
|
|
|
|
remove_vm_area(addr);
|
|
if (deallocate_pages) {
|
|
int i;
|
|
|
|
for (i = 0; i < area->nr_pages; i++) {
|
|
struct page *page = area->pages[i];
|
|
|
|
BUG_ON(!page);
|
|
__free_page(page);
|
|
}
|
|
|
|
if (area->flags & VM_VPAGES)
|
|
vfree(area->pages);
|
|
else
|
|
kfree(area->pages);
|
|
}
|
|
|
|
kfree(area);
|
|
return;
|
|
}
|
|
|
|
/**
|
|
* vfree - release memory allocated by vmalloc()
|
|
* @addr: memory base address
|
|
*
|
|
* Free the virtually continuous memory area starting at @addr, as
|
|
* obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
|
|
* NULL, no operation is performed.
|
|
*
|
|
* Must not be called in NMI context (strictly speaking, only if we don't
|
|
* have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
|
|
* conventions for vfree() arch-depenedent would be a really bad idea)
|
|
*
|
|
* NOTE: assumes that the object at *addr has a size >= sizeof(llist_node)
|
|
*/
|
|
void vfree(const void *addr)
|
|
{
|
|
BUG_ON(in_nmi());
|
|
|
|
kmemleak_free(addr);
|
|
|
|
if (!addr)
|
|
return;
|
|
if (unlikely(in_interrupt())) {
|
|
struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
|
|
if (llist_add((struct llist_node *)addr, &p->list))
|
|
schedule_work(&p->wq);
|
|
} else
|
|
__vunmap(addr, 1);
|
|
}
|
|
EXPORT_SYMBOL(vfree);
|
|
|
|
/**
|
|
* vunmap - release virtual mapping obtained by vmap()
|
|
* @addr: memory base address
|
|
*
|
|
* Free the virtually contiguous memory area starting at @addr,
|
|
* which was created from the page array passed to vmap().
|
|
*
|
|
* Must not be called in interrupt context.
|
|
*/
|
|
void vunmap(const void *addr)
|
|
{
|
|
BUG_ON(in_interrupt());
|
|
might_sleep();
|
|
if (addr)
|
|
__vunmap(addr, 0);
|
|
}
|
|
EXPORT_SYMBOL(vunmap);
|
|
|
|
/**
|
|
* vmap - map an array of pages into virtually contiguous space
|
|
* @pages: array of page pointers
|
|
* @count: number of pages to map
|
|
* @flags: vm_area->flags
|
|
* @prot: page protection for the mapping
|
|
*
|
|
* Maps @count pages from @pages into contiguous kernel virtual
|
|
* space.
|
|
*/
|
|
void *vmap(struct page **pages, unsigned int count,
|
|
unsigned long flags, pgprot_t prot)
|
|
{
|
|
struct vm_struct *area;
|
|
|
|
might_sleep();
|
|
|
|
if (count > totalram_pages)
|
|
return NULL;
|
|
|
|
area = get_vm_area_caller((count << PAGE_SHIFT), flags,
|
|
__builtin_return_address(0));
|
|
if (!area)
|
|
return NULL;
|
|
|
|
if (map_vm_area(area, prot, pages)) {
|
|
vunmap(area->addr);
|
|
return NULL;
|
|
}
|
|
|
|
return area->addr;
|
|
}
|
|
EXPORT_SYMBOL(vmap);
|
|
|
|
static void *__vmalloc_node(unsigned long size, unsigned long align,
|
|
gfp_t gfp_mask, pgprot_t prot,
|
|
int node, const void *caller);
|
|
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
|
|
pgprot_t prot, int node)
|
|
{
|
|
const int order = 0;
|
|
struct page **pages;
|
|
unsigned int nr_pages, array_size, i;
|
|
const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
|
|
const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
|
|
|
|
nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
|
|
array_size = (nr_pages * sizeof(struct page *));
|
|
|
|
area->nr_pages = nr_pages;
|
|
/* Please note that the recursion is strictly bounded. */
|
|
if (array_size > PAGE_SIZE) {
|
|
pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
|
|
PAGE_KERNEL, node, area->caller);
|
|
area->flags |= VM_VPAGES;
|
|
} else {
|
|
pages = kmalloc_node(array_size, nested_gfp, node);
|
|
}
|
|
area->pages = pages;
|
|
if (!area->pages) {
|
|
remove_vm_area(area->addr);
|
|
kfree(area);
|
|
return NULL;
|
|
}
|
|
|
|
for (i = 0; i < area->nr_pages; i++) {
|
|
struct page *page;
|
|
|
|
if (node == NUMA_NO_NODE)
|
|
page = alloc_page(alloc_mask);
|
|
else
|
|
page = alloc_pages_node(node, alloc_mask, order);
|
|
|
|
if (unlikely(!page)) {
|
|
/* Successfully allocated i pages, free them in __vunmap() */
|
|
area->nr_pages = i;
|
|
goto fail;
|
|
}
|
|
area->pages[i] = page;
|
|
if (gfpflags_allow_blocking(gfp_mask))
|
|
cond_resched();
|
|
}
|
|
|
|
if (map_vm_area(area, prot, pages))
|
|
goto fail;
|
|
return area->addr;
|
|
|
|
fail:
|
|
warn_alloc_failed(gfp_mask, order,
|
|
"vmalloc: allocation failure, allocated %ld of %ld bytes\n",
|
|
(area->nr_pages*PAGE_SIZE), area->size);
|
|
vfree(area->addr);
|
|
return NULL;
|
|
}
|
|
|
|
/**
|
|
* __vmalloc_node_range - allocate virtually contiguous memory
|
|
* @size: allocation size
|
|
* @align: desired alignment
|
|
* @start: vm area range start
|
|
* @end: vm area range end
|
|
* @gfp_mask: flags for the page level allocator
|
|
* @prot: protection mask for the allocated pages
|
|
* @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
|
|
* @node: node to use for allocation or NUMA_NO_NODE
|
|
* @caller: caller's return address
|
|
*
|
|
* Allocate enough pages to cover @size from the page level
|
|
* allocator with @gfp_mask flags. Map them into contiguous
|
|
* kernel virtual space, using a pagetable protection of @prot.
|
|
*/
|
|
void *__vmalloc_node_range(unsigned long size, unsigned long align,
|
|
unsigned long start, unsigned long end, gfp_t gfp_mask,
|
|
pgprot_t prot, unsigned long vm_flags, int node,
|
|
const void *caller)
|
|
{
|
|
struct vm_struct *area;
|
|
void *addr;
|
|
unsigned long real_size = size;
|
|
|
|
size = PAGE_ALIGN(size);
|
|
if (!size || (size >> PAGE_SHIFT) > totalram_pages)
|
|
goto fail;
|
|
|
|
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
|
|
vm_flags, start, end, node, gfp_mask, caller);
|
|
if (!area)
|
|
goto fail;
|
|
|
|
addr = __vmalloc_area_node(area, gfp_mask, prot, node);
|
|
if (!addr)
|
|
return NULL;
|
|
|
|
/*
|
|
* First make sure the mappings are removed from all page-tables
|
|
* before they are freed.
|
|
*/
|
|
vmalloc_sync_unmappings();
|
|
|
|
/*
|
|
* In this function, newly allocated vm_struct has VM_UNINITIALIZED
|
|
* flag. It means that vm_struct is not fully initialized.
|
|
* Now, it is fully initialized, so remove this flag here.
|
|
*/
|
|
clear_vm_uninitialized_flag(area);
|
|
|
|
/*
|
|
* A ref_count = 2 is needed because vm_struct allocated in
|
|
* __get_vm_area_node() contains a reference to the virtual address of
|
|
* the vmalloc'ed block.
|
|
*/
|
|
kmemleak_alloc(addr, real_size, 2, gfp_mask);
|
|
|
|
return addr;
|
|
|
|
fail:
|
|
warn_alloc_failed(gfp_mask, 0,
|
|
"vmalloc: allocation failure: %lu bytes\n",
|
|
real_size);
|
|
return NULL;
|
|
}
|
|
|
|
/**
|
|
* __vmalloc_node - allocate virtually contiguous memory
|
|
* @size: allocation size
|
|
* @align: desired alignment
|
|
* @gfp_mask: flags for the page level allocator
|
|
* @prot: protection mask for the allocated pages
|
|
* @node: node to use for allocation or NUMA_NO_NODE
|
|
* @caller: caller's return address
|
|
*
|
|
* Allocate enough pages to cover @size from the page level
|
|
* allocator with @gfp_mask flags. Map them into contiguous
|
|
* kernel virtual space, using a pagetable protection of @prot.
|
|
*/
|
|
static void *__vmalloc_node(unsigned long size, unsigned long align,
|
|
gfp_t gfp_mask, pgprot_t prot,
|
|
int node, const void *caller)
|
|
{
|
|
return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
|
|
gfp_mask, prot, 0, node, caller);
|
|
}
|
|
|
|
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
|
|
{
|
|
return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
|
|
__builtin_return_address(0));
|
|
}
|
|
EXPORT_SYMBOL(__vmalloc);
|
|
|
|
static inline void *__vmalloc_node_flags(unsigned long size,
|
|
int node, gfp_t flags)
|
|
{
|
|
return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
|
|
node, __builtin_return_address(0));
|
|
}
|
|
|
|
/**
|
|
* vmalloc - allocate virtually contiguous memory
|
|
* @size: allocation size
|
|
* Allocate enough pages to cover @size from the page level
|
|
* allocator and map them into contiguous kernel virtual space.
|
|
*
|
|
* For tight control over page level allocator and protection flags
|
|
* use __vmalloc() instead.
|
|
*/
|
|
void *vmalloc(unsigned long size)
|
|
{
|
|
return __vmalloc_node_flags(size, NUMA_NO_NODE,
|
|
GFP_KERNEL | __GFP_HIGHMEM);
|
|
}
|
|
EXPORT_SYMBOL(vmalloc);
|
|
|
|
/**
|
|
* vzalloc - allocate virtually contiguous memory with zero fill
|
|
* @size: allocation size
|
|
* Allocate enough pages to cover @size from the page level
|
|
* allocator and map them into contiguous kernel virtual space.
|
|
* The memory allocated is set to zero.
|
|
*
|
|
* For tight control over page level allocator and protection flags
|
|
* use __vmalloc() instead.
|
|
*/
|
|
void *vzalloc(unsigned long size)
|
|
{
|
|
return __vmalloc_node_flags(size, NUMA_NO_NODE,
|
|
GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
|
|
}
|
|
EXPORT_SYMBOL(vzalloc);
|
|
|
|
/**
|
|
* vmalloc_user - allocate zeroed virtually contiguous memory for userspace
|
|
* @size: allocation size
|
|
*
|
|
* The resulting memory area is zeroed so it can be mapped to userspace
|
|
* without leaking data.
|
|
*/
|
|
void *vmalloc_user(unsigned long size)
|
|
{
|
|
struct vm_struct *area;
|
|
void *ret;
|
|
|
|
ret = __vmalloc_node(size, SHMLBA,
|
|
GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
|
|
PAGE_KERNEL, NUMA_NO_NODE,
|
|
__builtin_return_address(0));
|
|
if (ret) {
|
|
area = find_vm_area(ret);
|
|
area->flags |= VM_USERMAP;
|
|
}
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(vmalloc_user);
|
|
|
|
/**
|
|
* vmalloc_node - allocate memory on a specific node
|
|
* @size: allocation size
|
|
* @node: numa node
|
|
*
|
|
* Allocate enough pages to cover @size from the page level
|
|
* allocator and map them into contiguous kernel virtual space.
|
|
*
|
|
* For tight control over page level allocator and protection flags
|
|
* use __vmalloc() instead.
|
|
*/
|
|
void *vmalloc_node(unsigned long size, int node)
|
|
{
|
|
return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
|
|
node, __builtin_return_address(0));
|
|
}
|
|
EXPORT_SYMBOL(vmalloc_node);
|
|
|
|
/**
|
|
* vzalloc_node - allocate memory on a specific node with zero fill
|
|
* @size: allocation size
|
|
* @node: numa node
|
|
*
|
|
* Allocate enough pages to cover @size from the page level
|
|
* allocator and map them into contiguous kernel virtual space.
|
|
* The memory allocated is set to zero.
|
|
*
|
|
* For tight control over page level allocator and protection flags
|
|
* use __vmalloc_node() instead.
|
|
*/
|
|
void *vzalloc_node(unsigned long size, int node)
|
|
{
|
|
return __vmalloc_node_flags(size, node,
|
|
GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
|
|
}
|
|
EXPORT_SYMBOL(vzalloc_node);
|
|
|
|
#ifndef PAGE_KERNEL_EXEC
|
|
# define PAGE_KERNEL_EXEC PAGE_KERNEL
|
|
#endif
|
|
|
|
/**
|
|
* vmalloc_exec - allocate virtually contiguous, executable memory
|
|
* @size: allocation size
|
|
*
|
|
* Kernel-internal function to allocate enough pages to cover @size
|
|
* the page level allocator and map them into contiguous and
|
|
* executable kernel virtual space.
|
|
*
|
|
* For tight control over page level allocator and protection flags
|
|
* use __vmalloc() instead.
|
|
*/
|
|
|
|
void *vmalloc_exec(unsigned long size)
|
|
{
|
|
return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
|
|
NUMA_NO_NODE, __builtin_return_address(0));
|
|
}
|
|
|
|
#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
|
|
#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
|
|
#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
|
|
#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
|
|
#else
|
|
#define GFP_VMALLOC32 GFP_KERNEL
|
|
#endif
|
|
|
|
/**
|
|
* vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
|
|
* @size: allocation size
|
|
*
|
|
* Allocate enough 32bit PA addressable pages to cover @size from the
|
|
* page level allocator and map them into contiguous kernel virtual space.
|
|
*/
|
|
void *vmalloc_32(unsigned long size)
|
|
{
|
|
return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
|
|
NUMA_NO_NODE, __builtin_return_address(0));
|
|
}
|
|
EXPORT_SYMBOL(vmalloc_32);
|
|
|
|
/**
|
|
* vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
|
|
* @size: allocation size
|
|
*
|
|
* The resulting memory area is 32bit addressable and zeroed so it can be
|
|
* mapped to userspace without leaking data.
|
|
*/
|
|
void *vmalloc_32_user(unsigned long size)
|
|
{
|
|
struct vm_struct *area;
|
|
void *ret;
|
|
|
|
ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
|
|
NUMA_NO_NODE, __builtin_return_address(0));
|
|
if (ret) {
|
|
area = find_vm_area(ret);
|
|
area->flags |= VM_USERMAP;
|
|
}
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(vmalloc_32_user);
|
|
|
|
/*
|
|
* small helper routine , copy contents to buf from addr.
|
|
* If the page is not present, fill zero.
|
|
*/
|
|
|
|
static int aligned_vread(char *buf, char *addr, unsigned long count)
|
|
{
|
|
struct page *p;
|
|
int copied = 0;
|
|
|
|
while (count) {
|
|
unsigned long offset, length;
|
|
|
|
offset = offset_in_page(addr);
|
|
length = PAGE_SIZE - offset;
|
|
if (length > count)
|
|
length = count;
|
|
p = vmalloc_to_page(addr);
|
|
/*
|
|
* To do safe access to this _mapped_ area, we need
|
|
* lock. But adding lock here means that we need to add
|
|
* overhead of vmalloc()/vfree() calles for this _debug_
|
|
* interface, rarely used. Instead of that, we'll use
|
|
* kmap() and get small overhead in this access function.
|
|
*/
|
|
if (p) {
|
|
/*
|
|
* we can expect USER0 is not used (see vread/vwrite's
|
|
* function description)
|
|
*/
|
|
void *map = kmap_atomic(p);
|
|
memcpy(buf, map + offset, length);
|
|
kunmap_atomic(map);
|
|
} else
|
|
memset(buf, 0, length);
|
|
|
|
addr += length;
|
|
buf += length;
|
|
copied += length;
|
|
count -= length;
|
|
}
|
|
return copied;
|
|
}
|
|
|
|
static int aligned_vwrite(char *buf, char *addr, unsigned long count)
|
|
{
|
|
struct page *p;
|
|
int copied = 0;
|
|
|
|
while (count) {
|
|
unsigned long offset, length;
|
|
|
|
offset = offset_in_page(addr);
|
|
length = PAGE_SIZE - offset;
|
|
if (length > count)
|
|
length = count;
|
|
p = vmalloc_to_page(addr);
|
|
/*
|
|
* To do safe access to this _mapped_ area, we need
|
|
* lock. But adding lock here means that we need to add
|
|
* overhead of vmalloc()/vfree() calles for this _debug_
|
|
* interface, rarely used. Instead of that, we'll use
|
|
* kmap() and get small overhead in this access function.
|
|
*/
|
|
if (p) {
|
|
/*
|
|
* we can expect USER0 is not used (see vread/vwrite's
|
|
* function description)
|
|
*/
|
|
void *map = kmap_atomic(p);
|
|
memcpy(map + offset, buf, length);
|
|
kunmap_atomic(map);
|
|
}
|
|
addr += length;
|
|
buf += length;
|
|
copied += length;
|
|
count -= length;
|
|
}
|
|
return copied;
|
|
}
|
|
|
|
/**
|
|
* vread() - read vmalloc area in a safe way.
|
|
* @buf: buffer for reading data
|
|
* @addr: vm address.
|
|
* @count: number of bytes to be read.
|
|
*
|
|
* Returns # of bytes which addr and buf should be increased.
|
|
* (same number to @count). Returns 0 if [addr...addr+count) doesn't
|
|
* includes any intersect with alive vmalloc area.
|
|
*
|
|
* This function checks that addr is a valid vmalloc'ed area, and
|
|
* copy data from that area to a given buffer. If the given memory range
|
|
* of [addr...addr+count) includes some valid address, data is copied to
|
|
* proper area of @buf. If there are memory holes, they'll be zero-filled.
|
|
* IOREMAP area is treated as memory hole and no copy is done.
|
|
*
|
|
* If [addr...addr+count) doesn't includes any intersects with alive
|
|
* vm_struct area, returns 0. @buf should be kernel's buffer.
|
|
*
|
|
* Note: In usual ops, vread() is never necessary because the caller
|
|
* should know vmalloc() area is valid and can use memcpy().
|
|
* This is for routines which have to access vmalloc area without
|
|
* any informaion, as /dev/kmem.
|
|
*
|
|
*/
|
|
|
|
long vread(char *buf, char *addr, unsigned long count)
|
|
{
|
|
struct vmap_area *va;
|
|
struct vm_struct *vm;
|
|
char *vaddr, *buf_start = buf;
|
|
unsigned long buflen = count;
|
|
unsigned long n;
|
|
|
|
/* Don't allow overflow */
|
|
if ((unsigned long) addr + count < count)
|
|
count = -(unsigned long) addr;
|
|
|
|
spin_lock(&vmap_area_lock);
|
|
list_for_each_entry(va, &vmap_area_list, list) {
|
|
if (!count)
|
|
break;
|
|
|
|
if (!(va->flags & VM_VM_AREA))
|
|
continue;
|
|
|
|
vm = va->vm;
|
|
vaddr = (char *) vm->addr;
|
|
if (addr >= vaddr + get_vm_area_size(vm))
|
|
continue;
|
|
while (addr < vaddr) {
|
|
if (count == 0)
|
|
goto finished;
|
|
*buf = '\0';
|
|
buf++;
|
|
addr++;
|
|
count--;
|
|
}
|
|
n = vaddr + get_vm_area_size(vm) - addr;
|
|
if (n > count)
|
|
n = count;
|
|
if (!(vm->flags & VM_IOREMAP))
|
|
aligned_vread(buf, addr, n);
|
|
else /* IOREMAP area is treated as memory hole */
|
|
memset(buf, 0, n);
|
|
buf += n;
|
|
addr += n;
|
|
count -= n;
|
|
}
|
|
finished:
|
|
spin_unlock(&vmap_area_lock);
|
|
|
|
if (buf == buf_start)
|
|
return 0;
|
|
/* zero-fill memory holes */
|
|
if (buf != buf_start + buflen)
|
|
memset(buf, 0, buflen - (buf - buf_start));
|
|
|
|
return buflen;
|
|
}
|
|
|
|
/**
|
|
* vwrite() - write vmalloc area in a safe way.
|
|
* @buf: buffer for source data
|
|
* @addr: vm address.
|
|
* @count: number of bytes to be read.
|
|
*
|
|
* Returns # of bytes which addr and buf should be incresed.
|
|
* (same number to @count).
|
|
* If [addr...addr+count) doesn't includes any intersect with valid
|
|
* vmalloc area, returns 0.
|
|
*
|
|
* This function checks that addr is a valid vmalloc'ed area, and
|
|
* copy data from a buffer to the given addr. If specified range of
|
|
* [addr...addr+count) includes some valid address, data is copied from
|
|
* proper area of @buf. If there are memory holes, no copy to hole.
|
|
* IOREMAP area is treated as memory hole and no copy is done.
|
|
*
|
|
* If [addr...addr+count) doesn't includes any intersects with alive
|
|
* vm_struct area, returns 0. @buf should be kernel's buffer.
|
|
*
|
|
* Note: In usual ops, vwrite() is never necessary because the caller
|
|
* should know vmalloc() area is valid and can use memcpy().
|
|
* This is for routines which have to access vmalloc area without
|
|
* any informaion, as /dev/kmem.
|
|
*/
|
|
|
|
long vwrite(char *buf, char *addr, unsigned long count)
|
|
{
|
|
struct vmap_area *va;
|
|
struct vm_struct *vm;
|
|
char *vaddr;
|
|
unsigned long n, buflen;
|
|
int copied = 0;
|
|
|
|
/* Don't allow overflow */
|
|
if ((unsigned long) addr + count < count)
|
|
count = -(unsigned long) addr;
|
|
buflen = count;
|
|
|
|
spin_lock(&vmap_area_lock);
|
|
list_for_each_entry(va, &vmap_area_list, list) {
|
|
if (!count)
|
|
break;
|
|
|
|
if (!(va->flags & VM_VM_AREA))
|
|
continue;
|
|
|
|
vm = va->vm;
|
|
vaddr = (char *) vm->addr;
|
|
if (addr >= vaddr + get_vm_area_size(vm))
|
|
continue;
|
|
while (addr < vaddr) {
|
|
if (count == 0)
|
|
goto finished;
|
|
buf++;
|
|
addr++;
|
|
count--;
|
|
}
|
|
n = vaddr + get_vm_area_size(vm) - addr;
|
|
if (n > count)
|
|
n = count;
|
|
if (!(vm->flags & VM_IOREMAP)) {
|
|
aligned_vwrite(buf, addr, n);
|
|
copied++;
|
|
}
|
|
buf += n;
|
|
addr += n;
|
|
count -= n;
|
|
}
|
|
finished:
|
|
spin_unlock(&vmap_area_lock);
|
|
if (!copied)
|
|
return 0;
|
|
return buflen;
|
|
}
|
|
|
|
/**
|
|
* remap_vmalloc_range_partial - map vmalloc pages to userspace
|
|
* @vma: vma to cover
|
|
* @uaddr: target user address to start at
|
|
* @kaddr: virtual address of vmalloc kernel memory
|
|
* @size: size of map area
|
|
*
|
|
* Returns: 0 for success, -Exxx on failure
|
|
*
|
|
* This function checks that @kaddr is a valid vmalloc'ed area,
|
|
* and that it is big enough to cover the range starting at
|
|
* @uaddr in @vma. Will return failure if that criteria isn't
|
|
* met.
|
|
*
|
|
* Similar to remap_pfn_range() (see mm/memory.c)
|
|
*/
|
|
int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
|
|
void *kaddr, unsigned long size)
|
|
{
|
|
struct vm_struct *area;
|
|
|
|
size = PAGE_ALIGN(size);
|
|
|
|
if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
|
|
return -EINVAL;
|
|
|
|
area = find_vm_area(kaddr);
|
|
if (!area)
|
|
return -EINVAL;
|
|
|
|
if (!(area->flags & VM_USERMAP))
|
|
return -EINVAL;
|
|
|
|
if (kaddr + size > area->addr + get_vm_area_size(area))
|
|
return -EINVAL;
|
|
|
|
do {
|
|
struct page *page = vmalloc_to_page(kaddr);
|
|
int ret;
|
|
|
|
ret = vm_insert_page(vma, uaddr, page);
|
|
if (ret)
|
|
return ret;
|
|
|
|
uaddr += PAGE_SIZE;
|
|
kaddr += PAGE_SIZE;
|
|
size -= PAGE_SIZE;
|
|
} while (size > 0);
|
|
|
|
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(remap_vmalloc_range_partial);
|
|
|
|
/**
|
|
* remap_vmalloc_range - map vmalloc pages to userspace
|
|
* @vma: vma to cover (map full range of vma)
|
|
* @addr: vmalloc memory
|
|
* @pgoff: number of pages into addr before first page to map
|
|
*
|
|
* Returns: 0 for success, -Exxx on failure
|
|
*
|
|
* This function checks that addr is a valid vmalloc'ed area, and
|
|
* that it is big enough to cover the vma. Will return failure if
|
|
* that criteria isn't met.
|
|
*
|
|
* Similar to remap_pfn_range() (see mm/memory.c)
|
|
*/
|
|
int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
|
|
unsigned long pgoff)
|
|
{
|
|
return remap_vmalloc_range_partial(vma, vma->vm_start,
|
|
addr + (pgoff << PAGE_SHIFT),
|
|
vma->vm_end - vma->vm_start);
|
|
}
|
|
EXPORT_SYMBOL(remap_vmalloc_range);
|
|
|
|
/*
|
|
* Implement stubs for vmalloc_sync_[un]mappings () if the architecture chose
|
|
* not to have one.
|
|
*
|
|
* The purpose of this function is to make sure the vmalloc area
|
|
* mappings are identical in all page-tables in the system.
|
|
*/
|
|
void __weak vmalloc_sync_mappings(void)
|
|
{
|
|
}
|
|
|
|
void __weak vmalloc_sync_unmappings(void)
|
|
{
|
|
}
|
|
|
|
static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
|
|
{
|
|
pte_t ***p = data;
|
|
|
|
if (p) {
|
|
*(*p) = pte;
|
|
(*p)++;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* alloc_vm_area - allocate a range of kernel address space
|
|
* @size: size of the area
|
|
* @ptes: returns the PTEs for the address space
|
|
*
|
|
* Returns: NULL on failure, vm_struct on success
|
|
*
|
|
* This function reserves a range of kernel address space, and
|
|
* allocates pagetables to map that range. No actual mappings
|
|
* are created.
|
|
*
|
|
* If @ptes is non-NULL, pointers to the PTEs (in init_mm)
|
|
* allocated for the VM area are returned.
|
|
*/
|
|
struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
|
|
{
|
|
struct vm_struct *area;
|
|
|
|
area = get_vm_area_caller(size, VM_IOREMAP,
|
|
__builtin_return_address(0));
|
|
if (area == NULL)
|
|
return NULL;
|
|
|
|
/*
|
|
* This ensures that page tables are constructed for this region
|
|
* of kernel virtual address space and mapped into init_mm.
|
|
*/
|
|
if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
|
|
size, f, ptes ? &ptes : NULL)) {
|
|
free_vm_area(area);
|
|
return NULL;
|
|
}
|
|
|
|
return area;
|
|
}
|
|
EXPORT_SYMBOL_GPL(alloc_vm_area);
|
|
|
|
void free_vm_area(struct vm_struct *area)
|
|
{
|
|
struct vm_struct *ret;
|
|
ret = remove_vm_area(area->addr);
|
|
BUG_ON(ret != area);
|
|
kfree(area);
|
|
}
|
|
EXPORT_SYMBOL_GPL(free_vm_area);
|
|
|
|
#ifdef CONFIG_SMP
|
|
static struct vmap_area *node_to_va(struct rb_node *n)
|
|
{
|
|
return n ? rb_entry(n, struct vmap_area, rb_node) : NULL;
|
|
}
|
|
|
|
/**
|
|
* pvm_find_next_prev - find the next and prev vmap_area surrounding @end
|
|
* @end: target address
|
|
* @pnext: out arg for the next vmap_area
|
|
* @pprev: out arg for the previous vmap_area
|
|
*
|
|
* Returns: %true if either or both of next and prev are found,
|
|
* %false if no vmap_area exists
|
|
*
|
|
* Find vmap_areas end addresses of which enclose @end. ie. if not
|
|
* NULL, *pnext->va_end > @end and *pprev->va_end <= @end.
|
|
*/
|
|
static bool pvm_find_next_prev(unsigned long end,
|
|
struct vmap_area **pnext,
|
|
struct vmap_area **pprev)
|
|
{
|
|
struct rb_node *n = vmap_area_root.rb_node;
|
|
struct vmap_area *va = NULL;
|
|
|
|
while (n) {
|
|
va = rb_entry(n, struct vmap_area, rb_node);
|
|
if (end < va->va_end)
|
|
n = n->rb_left;
|
|
else if (end > va->va_end)
|
|
n = n->rb_right;
|
|
else
|
|
break;
|
|
}
|
|
|
|
if (!va)
|
|
return false;
|
|
|
|
if (va->va_end > end) {
|
|
*pnext = va;
|
|
*pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
|
|
} else {
|
|
*pprev = va;
|
|
*pnext = node_to_va(rb_next(&(*pprev)->rb_node));
|
|
}
|
|
return true;
|
|
}
|
|
|
|
/**
|
|
* pvm_determine_end - find the highest aligned address between two vmap_areas
|
|
* @pnext: in/out arg for the next vmap_area
|
|
* @pprev: in/out arg for the previous vmap_area
|
|
* @align: alignment
|
|
*
|
|
* Returns: determined end address
|
|
*
|
|
* Find the highest aligned address between *@pnext and *@pprev below
|
|
* VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned
|
|
* down address is between the end addresses of the two vmap_areas.
|
|
*
|
|
* Please note that the address returned by this function may fall
|
|
* inside *@pnext vmap_area. The caller is responsible for checking
|
|
* that.
|
|
*/
|
|
static unsigned long pvm_determine_end(struct vmap_area **pnext,
|
|
struct vmap_area **pprev,
|
|
unsigned long align)
|
|
{
|
|
const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
|
|
unsigned long addr;
|
|
|
|
if (*pnext)
|
|
addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end);
|
|
else
|
|
addr = vmalloc_end;
|
|
|
|
while (*pprev && (*pprev)->va_end > addr) {
|
|
*pnext = *pprev;
|
|
*pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
|
|
}
|
|
|
|
return addr;
|
|
}
|
|
|
|
/**
|
|
* pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
|
|
* @offsets: array containing offset of each area
|
|
* @sizes: array containing size of each area
|
|
* @nr_vms: the number of areas to allocate
|
|
* @align: alignment, all entries in @offsets and @sizes must be aligned to this
|
|
*
|
|
* Returns: kmalloc'd vm_struct pointer array pointing to allocated
|
|
* vm_structs on success, %NULL on failure
|
|
*
|
|
* Percpu allocator wants to use congruent vm areas so that it can
|
|
* maintain the offsets among percpu areas. This function allocates
|
|
* congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
|
|
* be scattered pretty far, distance between two areas easily going up
|
|
* to gigabytes. To avoid interacting with regular vmallocs, these
|
|
* areas are allocated from top.
|
|
*
|
|
* Despite its complicated look, this allocator is rather simple. It
|
|
* does everything top-down and scans areas from the end looking for
|
|
* matching slot. While scanning, if any of the areas overlaps with
|
|
* existing vmap_area, the base address is pulled down to fit the
|
|
* area. Scanning is repeated till all the areas fit and then all
|
|
* necessary data structres are inserted and the result is returned.
|
|
*/
|
|
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
|
|
const size_t *sizes, int nr_vms,
|
|
size_t align)
|
|
{
|
|
const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
|
|
const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
|
|
struct vmap_area **vas, *prev, *next;
|
|
struct vm_struct **vms;
|
|
int area, area2, last_area, term_area;
|
|
unsigned long base, start, end, last_end;
|
|
bool purged = false;
|
|
|
|
/* verify parameters and allocate data structures */
|
|
BUG_ON(offset_in_page(align) || !is_power_of_2(align));
|
|
for (last_area = 0, area = 0; area < nr_vms; area++) {
|
|
start = offsets[area];
|
|
end = start + sizes[area];
|
|
|
|
/* is everything aligned properly? */
|
|
BUG_ON(!IS_ALIGNED(offsets[area], align));
|
|
BUG_ON(!IS_ALIGNED(sizes[area], align));
|
|
|
|
/* detect the area with the highest address */
|
|
if (start > offsets[last_area])
|
|
last_area = area;
|
|
|
|
for (area2 = 0; area2 < nr_vms; area2++) {
|
|
unsigned long start2 = offsets[area2];
|
|
unsigned long end2 = start2 + sizes[area2];
|
|
|
|
if (area2 == area)
|
|
continue;
|
|
|
|
BUG_ON(start2 >= start && start2 < end);
|
|
BUG_ON(end2 <= end && end2 > start);
|
|
}
|
|
}
|
|
last_end = offsets[last_area] + sizes[last_area];
|
|
|
|
if (vmalloc_end - vmalloc_start < last_end) {
|
|
WARN_ON(true);
|
|
return NULL;
|
|
}
|
|
|
|
vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
|
|
vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
|
|
if (!vas || !vms)
|
|
goto err_free2;
|
|
|
|
for (area = 0; area < nr_vms; area++) {
|
|
vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
|
|
vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
|
|
if (!vas[area] || !vms[area])
|
|
goto err_free;
|
|
}
|
|
retry:
|
|
spin_lock(&vmap_area_lock);
|
|
|
|
/* start scanning - we scan from the top, begin with the last area */
|
|
area = term_area = last_area;
|
|
start = offsets[area];
|
|
end = start + sizes[area];
|
|
|
|
if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) {
|
|
base = vmalloc_end - last_end;
|
|
goto found;
|
|
}
|
|
base = pvm_determine_end(&next, &prev, align) - end;
|
|
|
|
while (true) {
|
|
BUG_ON(next && next->va_end <= base + end);
|
|
BUG_ON(prev && prev->va_end > base + end);
|
|
|
|
/*
|
|
* base might have underflowed, add last_end before
|
|
* comparing.
|
|
*/
|
|
if (base + last_end < vmalloc_start + last_end) {
|
|
spin_unlock(&vmap_area_lock);
|
|
if (!purged) {
|
|
purge_vmap_area_lazy();
|
|
purged = true;
|
|
goto retry;
|
|
}
|
|
goto err_free;
|
|
}
|
|
|
|
/*
|
|
* If next overlaps, move base downwards so that it's
|
|
* right below next and then recheck.
|
|
*/
|
|
if (next && next->va_start < base + end) {
|
|
base = pvm_determine_end(&next, &prev, align) - end;
|
|
term_area = area;
|
|
continue;
|
|
}
|
|
|
|
/*
|
|
* If prev overlaps, shift down next and prev and move
|
|
* base so that it's right below new next and then
|
|
* recheck.
|
|
*/
|
|
if (prev && prev->va_end > base + start) {
|
|
next = prev;
|
|
prev = node_to_va(rb_prev(&next->rb_node));
|
|
base = pvm_determine_end(&next, &prev, align) - end;
|
|
term_area = area;
|
|
continue;
|
|
}
|
|
|
|
/*
|
|
* This area fits, move on to the previous one. If
|
|
* the previous one is the terminal one, we're done.
|
|
*/
|
|
area = (area + nr_vms - 1) % nr_vms;
|
|
if (area == term_area)
|
|
break;
|
|
start = offsets[area];
|
|
end = start + sizes[area];
|
|
pvm_find_next_prev(base + end, &next, &prev);
|
|
}
|
|
found:
|
|
/* we've found a fitting base, insert all va's */
|
|
for (area = 0; area < nr_vms; area++) {
|
|
struct vmap_area *va = vas[area];
|
|
|
|
va->va_start = base + offsets[area];
|
|
va->va_end = va->va_start + sizes[area];
|
|
__insert_vmap_area(va);
|
|
}
|
|
|
|
vmap_area_pcpu_hole = base + offsets[last_area];
|
|
|
|
spin_unlock(&vmap_area_lock);
|
|
|
|
/* insert all vm's */
|
|
for (area = 0; area < nr_vms; area++)
|
|
setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
|
|
pcpu_get_vm_areas);
|
|
|
|
kfree(vas);
|
|
return vms;
|
|
|
|
err_free:
|
|
for (area = 0; area < nr_vms; area++) {
|
|
kfree(vas[area]);
|
|
kfree(vms[area]);
|
|
}
|
|
err_free2:
|
|
kfree(vas);
|
|
kfree(vms);
|
|
return NULL;
|
|
}
|
|
|
|
/**
|
|
* pcpu_free_vm_areas - free vmalloc areas for percpu allocator
|
|
* @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
|
|
* @nr_vms: the number of allocated areas
|
|
*
|
|
* Free vm_structs and the array allocated by pcpu_get_vm_areas().
|
|
*/
|
|
void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < nr_vms; i++)
|
|
free_vm_area(vms[i]);
|
|
kfree(vms);
|
|
}
|
|
#endif /* CONFIG_SMP */
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
static void *s_start(struct seq_file *m, loff_t *pos)
|
|
__acquires(&vmap_area_lock)
|
|
{
|
|
loff_t n = *pos;
|
|
struct vmap_area *va;
|
|
|
|
spin_lock(&vmap_area_lock);
|
|
va = list_entry((&vmap_area_list)->next, typeof(*va), list);
|
|
while (n > 0 && &va->list != &vmap_area_list) {
|
|
n--;
|
|
va = list_entry(va->list.next, typeof(*va), list);
|
|
}
|
|
if (!n && &va->list != &vmap_area_list)
|
|
return va;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
static void *s_next(struct seq_file *m, void *p, loff_t *pos)
|
|
{
|
|
struct vmap_area *va = p, *next;
|
|
|
|
++*pos;
|
|
next = list_entry(va->list.next, typeof(*va), list);
|
|
if (&next->list != &vmap_area_list)
|
|
return next;
|
|
|
|
return NULL;
|
|
}
|
|
|
|
static void s_stop(struct seq_file *m, void *p)
|
|
__releases(&vmap_area_lock)
|
|
{
|
|
spin_unlock(&vmap_area_lock);
|
|
}
|
|
|
|
static void show_numa_info(struct seq_file *m, struct vm_struct *v)
|
|
{
|
|
if (IS_ENABLED(CONFIG_NUMA)) {
|
|
unsigned int nr, *counters = m->private;
|
|
|
|
if (!counters)
|
|
return;
|
|
|
|
if (v->flags & VM_UNINITIALIZED)
|
|
return;
|
|
/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
|
|
smp_rmb();
|
|
|
|
memset(counters, 0, nr_node_ids * sizeof(unsigned int));
|
|
|
|
for (nr = 0; nr < v->nr_pages; nr++)
|
|
counters[page_to_nid(v->pages[nr])]++;
|
|
|
|
for_each_node_state(nr, N_HIGH_MEMORY)
|
|
if (counters[nr])
|
|
seq_printf(m, " N%u=%u", nr, counters[nr]);
|
|
}
|
|
}
|
|
|
|
static int s_show(struct seq_file *m, void *p)
|
|
{
|
|
struct vmap_area *va = p;
|
|
struct vm_struct *v;
|
|
|
|
/*
|
|
* s_show can encounter race with remove_vm_area, !VM_VM_AREA on
|
|
* behalf of vmap area is being tear down or vm_map_ram allocation.
|
|
*/
|
|
if (!(va->flags & VM_VM_AREA))
|
|
return 0;
|
|
|
|
v = va->vm;
|
|
|
|
seq_printf(m, "0x%pK-0x%pK %7ld",
|
|
v->addr, v->addr + v->size, v->size);
|
|
|
|
if (v->caller)
|
|
seq_printf(m, " %pS", v->caller);
|
|
|
|
if (v->nr_pages)
|
|
seq_printf(m, " pages=%d", v->nr_pages);
|
|
|
|
if (v->phys_addr)
|
|
seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr);
|
|
|
|
if (v->flags & VM_IOREMAP)
|
|
seq_puts(m, " ioremap");
|
|
|
|
if (v->flags & VM_ALLOC)
|
|
seq_puts(m, " vmalloc");
|
|
|
|
if (v->flags & VM_MAP)
|
|
seq_puts(m, " vmap");
|
|
|
|
if (v->flags & VM_USERMAP)
|
|
seq_puts(m, " user");
|
|
|
|
if (v->flags & VM_VPAGES)
|
|
seq_puts(m, " vpages");
|
|
|
|
if (v->flags & VM_LOWMEM)
|
|
seq_puts(m, " lowmem");
|
|
|
|
show_numa_info(m, v);
|
|
seq_putc(m, '\n');
|
|
return 0;
|
|
}
|
|
|
|
static const struct seq_operations vmalloc_op = {
|
|
.start = s_start,
|
|
.next = s_next,
|
|
.stop = s_stop,
|
|
.show = s_show,
|
|
};
|
|
|
|
static int vmalloc_open(struct inode *inode, struct file *file)
|
|
{
|
|
if (IS_ENABLED(CONFIG_NUMA))
|
|
return seq_open_private(file, &vmalloc_op,
|
|
nr_node_ids * sizeof(unsigned int));
|
|
else
|
|
return seq_open(file, &vmalloc_op);
|
|
}
|
|
|
|
static const struct file_operations proc_vmalloc_operations = {
|
|
.open = vmalloc_open,
|
|
.read = seq_read,
|
|
.llseek = seq_lseek,
|
|
.release = seq_release_private,
|
|
};
|
|
|
|
static int __init proc_vmalloc_init(void)
|
|
{
|
|
proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
|
|
return 0;
|
|
}
|
|
module_init(proc_vmalloc_init);
|
|
|
|
#endif
|
|
|