Merge remote-tracking branch 'origin/auto-kernel' into auto-kernel-oss

* origin/auto-kernel:
  iommu: revert changes from LE.UM.3.3.2-08200-SDX55
  techpack: audio: dsp: properly guard the function
  drm/msm/sde: add sysfs node for trigger wake up early
  IPA: Remove IPA page allocation failure warning
  ARM64: dts: raphael: disable wdog snarl IRQ
  power: smb5: control wdog snarl irq enabled from device tree
  smb5-lib: Create macro to communicate suspend ICL threshold
  scsi: ufs: disable clocks all the time when autohibern8 supports
  kernel: freezer: don't freeze on exit if killable
  kernel: freezer: sync TIF_SIGPENDING clearing with signals
  cgroup: Implement Optionally killable freezer
  thermal: Don't register for non-existing thermal zone
  ARM64: dts: disable second gpu thermal zone
  block,cfq: Set cfq_back_penalty to 1
  block,cfq: Set cfq_quantum to 16
  cpuidle: enter_state: Don't needlessly calculate diff time
  BACKPORT: disp: msm: sde: add pm QoS vote on CPU receiving display IRQ
  firmware: update goodix_gt9886_cfg_f11 touch firmware
  ARM64: configs: raphael: sync with previous changes
  qcacld: do not manually re-enable -Wmaybe-uninitialized
  ext4: Allocate 128-byte allocation context on the stack
  xattr: Avoid dynamically allocating memory in getxattr for small xattrs
  kernfs: use buffer from the stack space
  ARM64: configs: raphael: Disable stack frame size warning
  net/ipv4: fib_trie: Avoid cryptic ternary expressions
  ion: system_heap: Speed up system heap allocations
  drm/sde: use buffer from the stack space
  kthread: use buffer from the stack space
  ARM64: configs: raphael: Disable SMACK and Integrity security suites
  exec: use bprm from the stack space
  sched: do not allocate window cpu arrays separately
  power_supply: don't allocate attrname
  drm/msm/sde: allocate kmem_fence_pool once
  msm: vidc: allocate kmem_buf_pool once
  binfmt_elf: Don't allocate memory dynamically in load_elf_binary
  ANDROID: sdcardfs: Alloc memory only when needed in __sdcardfs_lookup()
  ALSA: control: Don't dynamically allocate a single struct instance
  ALSA: control_compat: Don't dynamically allocate single-use structs
  media: v4l2-ioctl: Use a larger on-stack video copy buffer
  kobject_uevent: Allocate environment buffer on the stack
  scatterlist: Don't allocate sg lists using __get_free_page
  mm/slab_common: Align all caches' objects to hardware cachelines
  ext4: Allocate allocation-context on the stack
  Revert "lib: fix stall in __bitmap_parselist()"
  init: try to mount partition labeled "system" first
  arm64: Avoid watchdog during dump wlan firmware after panic
  arm64/kernel: jump_label: Switch to relative references
  locking/static_key: Don't take sleeping locks in __static_key_slow_dec_deferred()
  locking/static_key: Factor out the fast path of static_key_slow_dec()
  locking/static_key: Add support for deferred static branches
  jump_label: Add branch hints to static_branch_{un,}likely()
  locking/static_key: Fix false positive warnings on concurrent dec/inc
  jump_label: Fix NULL dereference bug in __jump_label_mod_update()
  jump_label: Annotate entries that operate on __init code earlier
  jump_label: Implement generic support for relative references
  jump_label: Abstract jump_entry member accessors
  jump_label/lockdep: Assert we hold the hotplug lock for _cpuslocked() operations
  jump_label: Fix typo in warning message
  jump_label: Use static_key_linked() accessor
  jump_label: Disable jump labels in __exit code
  jump_label: Fix sparc64 warning
  extable: Make init_kernel_text() global
  jump_label: Warn on failed jump_label patching attempt
  jump_label: Explicitly disable jump labels in __init code
  rcu: Speed up calling of RCU tasks callbacks
  arm64/neon: Disable -Wincompatible-pointer-types when building with Clang
  arm64/neon: add workaround for ambiguous C99 stdint.h types
  binder: Reserve caches for small, high-frequency memory allocations
  Revert "Initialize ata before graphics"
  security: selinux: remove __rticdata attribute
  dma_buf: try to use kmem_cache pool for dmabuf allocations
  dma_buf: use kmem_cache pool for struct sync_file
  dma_buf: use kmem_cache pool for struct dma_buf_attachment
  quota_tree: Avoid dynamic memory allocations
  dcache: increase DNAME_INLINE_LEN
  drm/msm/sde: use kmem_cache pool for struct sde_fence
  drm/msm: use kmem_cache pool for struct vblank_work
  msm: kgsl: use kmem_cache pool for draw objects
  msm: camera: use kmem_cache pool for struct sync_user_payload
  msm: vidc: Avoid dynamic memory allocation for small voting data
  msm: vidc: use kmem_cache pool for struct msm_vidc_buffer
  kernfs: use kmem_cache pool for struct kernfs_open_node/file
  sdcardfs: use kmem_cache pool for struct sdcardfs_file_info
  cgroup: use kmem_cache pool for struct cgrp_cset_link
  sde_crtc: Don't allocate memory dynamically in sde_crtc_atomic_check()
  dma-buf: Avoid dynamic memory allocation for small info buffers
  msm: kgsl: Avoid dynamically allocating small command buffers
  msm: kgsl: Don't allocate memory dynamically for temp command buffers
  msm: kgsl: Don't allocate memory dynamically for drawobj sync structs
  ARM64: configs: raphael: DNM: disable Auditing
  drm: display: silence display_set_power logspam
  techpack: audio: silence ELUS logging
  fs: sdcardfs: silence spammy loggers
  techpack: data: rmnet_perf: disable debugging
  drivers: silence few more loggers
  qcom-rpmh-mailbox: silence, driver!
  diagfwd_cntl: silence some more debug
  power: wakeup: silence logspam
  kernel: silence suspends/resume logging
  power: suspend: silence suspend monitor
  msm_vidc: silence video instance open/close spam
  backlight: silence a spammy logger
  mm: compaction: avoid 100% CPU usage during compaction when a task is killed
  gpu: adreno: run kgsl_3d_init on perf critical thread
  cpuidle: Mark CPUs idle as late as possible to avoid unneeded IPIs
  cpuidle: Optimize pm_qos notifier callback and IPI semantics
  arm64: Allow IPI_WAKEUP to be used outside of the ACPI parking protocol
  pinctrl: msm: Remove explicit barriers from mmio ops where unneeded
  locking/atomics, asm-generic/bitops/atomic.h: Rewrite using atomic_*() APIs
  drm/msm/sde: Wait for ctl_start asynchronously on cmd mode panels
  msm: kgsl: Don't try to wait for fences that have been signaled
  ARM64: dts: remove pm qos active latency override
  msm: kgsl: Relax CPU latency requirements to save power
  cpuidle: lpm-levels: Allow exit latencies equal to target latencies
  msm: kgsl: Wake GPU upon receiving an ioctl rather than upon touch input
  qos: Execute notifier callbacks atomically
  qos: Remove remaining instances of disabling IRQs for pm_qos_lock
  qos: Don't disable interrupts while holding pm_qos_lock
  msm: kgsl: Mark IRQ and worker thread as performance critical
  drm/msm: only force actual screen threads to big cluster
  drm/msm: Mark important kthreads as performance critical
  drm: Mark IRQ as performance critical
  scsi: ufshcd: mark IRQ as performance critical
  irq: silence 'irq no longer affine' messages
  kernel: irq: add more backup pathways for perf critical IRQs
  kernel: irq: properly disallow userspace from changing IRQs affinity
  kernel: irq: manage: use a different way of affining perf IRQs
  kernel: Don't allow IRQ affinity masks to have more than one CPU
  kernel: Add API to mark IRQs and kthreads as performance critical
  proc: cmdline: Patch SafetyNet flags
  proc: Remove SafetyNet flags from /proc/cmdline
  usb: gadget: f_fs: silence unused-variable warnings with IPC LOGGING disabled
  slimbus: fix a maybe-unused variable warning
  ANDROID: increase limit on sched-tune boost groups
  sched/tune: Increase the cgroup limit to 7
  cpufreq: schedutil: use scnprintf() instead of snprintf()
  sched: cpufreq_schedutil: Fixup snprintf warnings.
  cpufreq: Avoid leaving stale IRQ work items during CPU offline
  cpufreq: Rename cpufreq_can_do_remote_dvfs()
  sched/cpufreq_schedutil: create a function for common steps
  sched/walt: Improve the scheduler
  sched/core: Ensure cpu number is valid
  Revert "mm: oom_kill: reap memory of a task that receives SIGKILL"
  ARM64: configs: raphael: Disable External SOCs Control Support
  ARM64: configs: raphael: Disable Signature verification
  ARM64: configs: raphael: Disable Scheduler Debugging
  ARM64: configs: raphael: Disable unused ERRATUM fixes
  ARM64: configs: raphael: Disable generic sound device drivers
  ARM64: configs: raphael: Disable unused Switch Architecture drivers
  techpack: audio: dsp: fix compilation without VOICE_MHI
  ARM64: configs: raphael: Disable MHI protocol support
  ARM64: configs: raphael: disable PCI Bus support
  PCI: Add dummy functions to fix IPAv3 in CONFIG_PCI=n builds
  techpack: audio: silence unused variable warnings
  vfs: Bump max inline dirent name size
  ARM64 :configs: raphael: Enable optimized inlining
  compiler: allow all arches to enable CONFIG_OPTIMIZE_INLINING
  arm64: mark (__)cpus_have_const_cap as __always_inline
  arm64: configs: raphael: enable config ARCH_HAS_FAST_MULTIPLIER
  arm64: Select ARCH_HAS_FAST_MULTIPLIER
  ARM64: configs: raphael: Sync for fast full refcount checking
  FROMLIST: arm64: kernel: implement fast refcount checking
  arm64: debug: Separate debug hooks based on target exception level
  writeback: hardcode dirty_expire_centisecs=3000 (30s)
  soc: dcc_v2: remove _no_log() usage
  treewide: remove remaining _no_log() usage
  ARM64: configs: raphael: sync for no RTB support
  Revert "ARM: msm: add support for logged IO accessors"
  Revert "arm64: mm: Log the process id in the rtb"
  Revert "sched: move logging process id in the rtb to sched"
  Revert "ARM: gic-v3: Log the IRQs in RTB before handling an IRQ"
  Revert "ARM: gic: Add support for logging interrupts in RTB"
  Revert "trace: rtb: add msm_rtb register tracing feature snapshot"
  Revert "msm: redefine __raw_{read, write}v for RTB"
  Revert "arm64: Prevent msm-rtb tracing in memcpy_{from,to}io and memset_io"
  Revert "drivers: GICv3: remove the rtb logs of gic write and read"
  mm: kmemleak: Don't die when memory allocation fails
  xfrm interface: fix memory leak on creation
  arm64: vdso: Mark vdso_pagelist allocation as not a leak
  new: netfilter: mark a few allocations as not leaks
  dmaengine: Fix memory leak in dma_async_device_register
  qcom: pdc: fix a memory leak
  blkdev: switch to SSD mode and entropy gathering
  mm/page_alloc.c: free order-0 pages through PCP in page_frag_free()
  compat_ioctl: add compat_ptr_ioctl()
  timer: Convert schedule_timeout() to use from_timer()
  random: don't forget compat_ioctl on urandom
  compat_ioctl: remove /dev/random commands
  random: try to actively add entropy rather than passively wait for it
  random: fix soft lockup when trying to read from an uninitialized blocking pool
  random: only read from /dev/random after its pool has received 128 bits
  random: remove preempt disabled region
  random: Fix whitespace pre random-bytes work
  drivers/char/random.c: remove unused dont_count_entropy
  block: silently forbid sending any ioctl to a partition
  msm: vidc: do not allow queue buffer in flush
  ARM64: configs: raphael: Switch to fq_codel queue discipline
  treewide: silence warning about create IPC logging context
  drivers: squash make every IPC LOGGING a no-op
  treewide: make every ipc_loggings a no-op
  treewide: Don't warn about debugfs init failure if disabled
  ARM64: configs: raphael: disable DEBUG_KERNEL
  ARM64: configs: raphael: disable DEBUG_FS
  ARM64: configs: raphael: Disable TRACING
  trace: add CONFIG_DISABLE_TRACE_PRINTK option
  ARM64: configs: raphael: disable profiling support
  uid_sys_stats: Remove dependency on the profiling subsystem
  profiling: Implement a simple task exit notifier when disabled
  memory_state_time: Remove dependency on profiling support
  msm_bus: remove tracer
  msm: msm_bus: Never compile debugfs-related code
  msm: msm_bus: Kill transaction logging functionality
  cpuidle: lpm-levels: Remove debug event logging
  scsi: ufs: Fix unused function when tracepoints are disabled
  Revert "Perf: arm64: Add Snapshot of perf tracepoints"
  kernel/printk: use on-stack allocations for kernel log
  ipa_v3: fix some maybe-uninitialised warnings
  drivers: fix a couple of unused-variable warnings
  qcom: soc: enable MSM_IDLE_STATS even with debugfs disabled
  soc: qcom: make RPM stats driver debugfs independent
  lib: debug: remove DEBUG_KERNEL dependency from SCHED_DEBUG
  techpack: fix compilation with debugfs disabled
  techpack: fix the rest of debug_fs related errors
  msm: crypto: fix compilation with debugfs disabled
  msm: qcedev: don't bail out if debugfs is disabled
  drm/msm/dsi-staging: Don't bail out when debugfs creation fails
  media: msm: npu: disable debug driver
  msm_vidc: enable debug_fs usage
  soc: qcom: rpm_stats: enable debugfs usage
  qcacld-3.0: enable debugfs usage
  msm: ipa: enable debugfs usage
  drm: dp: enable debugfs usage
  sched: debug: enable debugfs usage
  wakeup: enable debugfs usage
  debugfs: Always compile core debugfs driver for Android kernels
  firmware: qcom: Remove debugfs dependency from tz-log driver
  init: Kconfig: Don't force DEBUG_KERNEL when EXPERT is enabled
  selinux: Remove audit dependency
  ARM64: configs: raphael: Disable SECCOMP
  kernel: fake system calls on seccomp to succeed
  blk: disable IO_STAT completely
  block: disable I/O stats accounting by default
  drivers: gpu: msm: silence unused variables warnings
  Revert "msm: kgsl: Add gpu_frequency trace during SLUMBER entry and exit"
  Revert "drm/msm/sde: add separate handling for rsc states"
  Revert "msm: kgsl: Add gpu_frequency tracepoint to power trace system"
  adreno: leave only /sys/kernel/debug/kgsl/proc/%d/mem for debugfs
  adreno: hardcode for a640
  adreno: disable snapshot, coresight and trace
  gpu: adreno: only compile Adreno 6xx driver
  drm/sde: hardcode for sm8150 v2.0
  drm/msm: skip sde_dbg compilation altogether
  drm/msm/sde: Remove redundant crtc_state assignment
  drm/msm/sde: Don't clear dim layer settings if there are none
  drivers: msm: Don't copy fence names by default
  drm/msm/sde: Omit debug logging code
  drm/msm: Omit SDE event log and most debugfs code
  drm/msm/sde: Cache register values when performing clock control
  drm/msm/sde: Remove register write debug logging
  soc: qcom: watchdog_v2: Fix memory leaks when memory_dump_v2 isn't built
  ufshcd: show bogus clkscale_enable even if it's not supported
  r8152: switch to realtek.com.tw release v2.12.0
  time: move frequently used functions to headers and declare them inline
  printk: disable console suspend by default
  block: replace io_schedule with io_schedule_timeout
  kernel: time: reduce ntp wakeups
  Initialize ata before graphics
  msm: kgsl: Stop slab shrinker when no more pages can be reclaimed
  mm: skip swap readahead when process is exiting
  Revert "Revert "select: use freezable blocking call""
  pwm: treat double free as a debug message
  mm: compaction: Fix bad logging
  random: always use /dev/urandom
  mm: compaction: switch FB notifier API to MSM DRM notifier
  mm: compaction: Add automatic compaction mechanism
  workqueue: Implement delayed_work_busy()
  random: prevent add_input from doing anything
  binfmt_elf.c: use get_random_int() to fix entropy depleting
  binder: set binder_debug_mask=0 to suppress logging
  selinux: don't require auditing
  msm: vidc: Disable msm_vidc_fw_debug_mode
  lib: Disable debug_locks
  msm: bus_arb: disable debug logging
  sde: sde_rotator_smmu: specify sync probe for sde_rotator_smmu
  CHROMIUM: selinux: Do not log permissive denials
  diag: disable DIAG_DEBUG
  sysctl: promote several nodes out of CONFIG_SCHED_DEBUG
  sysctl: promote sched_migration_cost_ns out of CONFIG_SCHED_DEBUG
  printk: fix message filter
  printk: filter out some more charging related logspam
  kernel: printk: silence suspend debug
  printk: ignore healthd and cacert related messages
  printk: block healthd messages in kernel log
  drivers: misc: silence driver logging
  drm: msm: dsi_panel: shut up!
  power: qcom: Specify sync probe for smb1390-charger
  power: qcom: fixup Xiaomi changes import
  power: supply: qcom: silence some annoying loggers
  ARM64: configs: raphael: Enable Cleancache
  ARM64: configs: raphael: Enable Userspace LMK
  ARM64: configs: raphael: switch PELT halflife to 32ms
  ARM64: configs: raphael: Disable PAGE_EXTENSION
  ARM64: configs: raphael: Disable unused graphics modules
  ARM64: configs: raphael: Disable USB Media Support
  ARM64: configs: raphael: Disable unused USBNET modules
  ARM64: configs: raphael: Disable workqueue power-efficient mode by default
  ARM64: configs: raphael: Disable config BLK_DEV_BSG
  ARM64: configs: raphael: Disable QCOM_MEM_OFFLINE
  ARM64: configs: raphael: Disable memory hotplugging support
  msm: ipa3: fix callback function type for ndo_start_xmit
  ANDROID: arm64: kprobes: fix kprobes without CONFIG_KRETPROBES
  ANDROID: kprobes: disable kretprobes with SCS
  ANDROID: kprobes: fix compilation without CONFIG_KRETPROBES
  ANDROID: Makefile: set -Qunused-arguments sooner
  FROMLIST: Makefile: lld: tell clang to use lld
  ANDROID: arm64: add support for building the KASLR kernel with LLVM lld
  Makefile: Use O3 optimization level for Clang LTO
  Revert "ANDROID: arm64: add support for building the KASLR kernel with LLVM lld"
  ANDROID: modules: undo -fdata-sections and -ffunction-sections
  ANDROID: modules: manually merge module .bss sections
  crc32: fix ambiguous aliases
  vdso32: Invoke clang with correct path to GCC toolchain
  ARM64: configs: raphael: Enable DSI panel configuration parser
  drm: msm: remove dynamic_debug dependency from DSI Parser
  dsi_panel: simplify DISPPARAM_HBM_BACKLIGHT_RESEND
  drm/msm/sde: init IRQ lists after allocated node
  drm/msm/dsi-staging: allow multiple listeners on TE
  drm: msm: remove esd irq handling
  ARM64: dts: sm8150-v2.dtsi: Fix cpu4_cpu_l3_latmon
  ARM64: dts: pcie: increase required bus BW for PCIe
  ARM: dts: Allow Adreno 640 to nap
  ARM: dts: sm8150: Set GPU idle timeout to 64 ms
  ARM: dts: sm8150: Disable unhandled or broken IRQ monitoring
  Revert "ARM: dts: msm: Set rcu_expedited for sdm855"
  ARM64: dts: dsi-panel: ea8076: remove esd irq handling
  smb5-lib: clear USB thermal ICL vote if temp_level is 0
  ARM64: configs: raphael: disable QRTR_FIFO config

Signed-off-by: UtsavBalar1231 <utsavbalar1231@gmail.com>
This commit is contained in:
UtsavBalar1231
2020-06-16 00:18:45 +05:30
387 changed files with 13358 additions and 7608 deletions

View File

@@ -90,6 +90,18 @@ The following cgroupfs files are created by cgroup freezer.
Shows the parent-state. 0 if none of the cgroup's ancestors is
frozen; otherwise, 1.
* freezer.killable: Read-write
When read, returns the killable state of a cgroup - "1" if frozen
tasks will respond to fatal signals, or "0" if they won't.
When written, this property sets the killable state of the cgroup.
A value equal to "1" will switch the state of all frozen tasks in
the cgroup to TASK_INTERRUPTIBLE (similarly to cgroup v2) and will
make them react to fatal signals. A value of "0" will switch the
state of frozen tasks to TASK_UNINTERRUPTIBLE and they won't respond
to signals unless thawed or unfrozen.
The root cgroup is non-freezable and the above interface files don't
exist.

View File

@@ -1,22 +0,0 @@
Register Trace Buffer (RTB)
The RTB is used to log discrete events in the system in an uncached buffer that
can be post processed from RAM dumps. The RTB must reserve memory using
the msm specific memory reservation bindings (see
Documentation/devicetree/bindings/arm/msm/memory-reserve.txt).
Required properties
- compatible: "qcom,msm-rtb"
- qcom,rtb-size: size of the RTB buffer in bytes
Optional properties:
- linux,contiguous-region: phandle reference to a CMA region
Example:
qcom,msm-rtb {
compatible = "qcom,msm-rtb";
qcom,rtb-size = <0x100000>;
};

View File

@@ -50,7 +50,6 @@ Currently, these files are in /proc/sys/vm:
- nr_trim_pages (only if CONFIG_MMU=n)
- numa_zonelist_order
- oom_dump_tasks
- reap_mem_on_sigkill
- oom_kill_allocating_task
- overcommit_kbytes
- overcommit_memory
@@ -658,24 +657,6 @@ The default value is 1 (enabled).
==============================================================
reap_mem_on_sigkill
This enables or disables the memory reaping for a SIGKILL received
process and that the sending process must have the CAP_KILL capabilities.
If this is set to 1, when a process receives SIGKILL from a process
that has the capability, CAP_KILL, the process is added into the oom_reaper
queue which can be picked up by the oom_reaper thread to reap the memory of
that process. This reaps for the process which received SIGKILL through
either sys_kill from user or kill_pid from kernel.
If this is set to 0, we are not reaping memory of a SIGKILL, sent through
either sys_kill from user or kill_pid from kernel, received process.
The default value is 0 (disabled).
==============================================================
oom_kill_allocating_task
This enables or disables killing the OOM-triggering task in

View File

@@ -506,6 +506,10 @@ CLANG_FLAGS += $(call cc-option, -Wno-bool-operation)
CLANG_FLAGS += $(call cc-option, -Wno-unsequenced)
KBUILD_CFLAGS += $(CLANG_FLAGS)
KBUILD_AFLAGS += $(CLANG_FLAGS)
ifeq ($(ld-name),lld)
KBUILD_CFLAGS += -fuse-ld=lld
endif
KBUILD_CPPFLAGS += -Qunused-arguments
export CLANG_FLAGS
ifeq ($(ld-name),lld)
KBUILD_CFLAGS += -fuse-ld=lld
@@ -681,6 +685,8 @@ endif
LLVM_AR := llvm-ar
LLVM_NM := llvm-nm
export LLVM_AR LLVM_NM
# Set O3 optimization level for LTO
LDFLAGS += --plugin-opt=O3
endif
# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default

View File

@@ -175,7 +175,7 @@ config ARCH_USE_BUILTIN_BSWAP
config KRETPROBES
def_bool y
depends on KPROBES && HAVE_KRETPROBES
depends on KPROBES && HAVE_KRETPROBES && ROP_PROTECTION_NONE
config USER_RETURN_NOTIFIER
bool
@@ -333,6 +333,9 @@ config HAVE_PERF_USER_STACK_DUMP
config HAVE_ARCH_JUMP_LABEL
bool
config HAVE_ARCH_JUMP_LABEL_RELATIVE
bool
config HAVE_RCU_TABLE_FREE
bool
@@ -1102,6 +1105,16 @@ config STRICT_MODULE_RWX
and non-text memory will be made non-executable. This provides
protection against certain security exploits (e.g. writing to text)
config ARCH_HAS_REFCOUNT_FULL
bool
select ARCH_HAS_REFCOUNT
help
An architecture selects this when the optimized refcount_t
implementation it provides covers all the cases that
CONFIG_REFCOUNT_FULL covers as well, in which case it makes no
sense to even offer CONFIG_REFCOUNT_FULL as a user selectable
option.
config ARCH_HAS_REFCOUNT
bool
help
@@ -1115,7 +1128,7 @@ config ARCH_HAS_REFCOUNT
against bugs in reference counts.
config REFCOUNT_FULL
bool "Perform full reference count validation at the expense of speed"
bool "Perform full reference count validation at the expense of speed" if !ARCH_HAS_REFCOUNT_FULL
help
Enabling this switches the refcounting infrastructure from a fast
unchecked atomic_t implementation to a fully state checked

View File

@@ -28,7 +28,6 @@
#include <asm/byteorder.h>
#include <asm/memory.h>
#include <asm-generic/pci_iomap.h>
#include <linux/msm_rtb.h>
#include <xen/xen.h>
/*
@@ -62,24 +61,23 @@ void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen);
* the bus. Rather than special-case the machine, just let the compiler
* generate the access for CPUs prior to ARMv6.
*/
#define __raw_readw_no_log(a) (__chk_io_ptr(a), \
*(volatile unsigned short __force *)(a))
#define __raw_writew_no_log(v, a) ((void)(__chk_io_ptr(a), \
*(volatile unsigned short __force *)\
(a) = (v)))
#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
#define __raw_writew(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)))
#else
/*
* When running under a hypervisor, we want to avoid I/O accesses with
* writeback addressing modes as these incur a significant performance
* overhead (the address generation must be emulated in software).
*/
static inline void __raw_writew_no_log(u16 val, volatile void __iomem *addr)
#define __raw_writew __raw_writew
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
{
asm volatile("strh %1, %0"
: : "Q" (*(volatile u16 __force *)addr), "r" (val));
}
static inline u16 __raw_readw_no_log(const volatile void __iomem *addr)
#define __raw_readw __raw_readw
static inline u16 __raw_readw(const volatile void __iomem *addr)
{
u16 val;
asm volatile("ldrh %0, %1"
@@ -89,19 +87,22 @@ static inline u16 __raw_readw_no_log(const volatile void __iomem *addr)
}
#endif
static inline void __raw_writeb_no_log(u8 val, volatile void __iomem *addr)
#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
{
asm volatile("strb %1, %0"
: : "Qo" (*(volatile u8 __force *)addr), "r" (val));
}
static inline void __raw_writel_no_log(u32 val, volatile void __iomem *addr)
#define __raw_writel __raw_writel
static inline void __raw_writel(u32 val, volatile void __iomem *addr)
{
asm volatile("str %1, %0"
: : "Qo" (*(volatile u32 __force *)addr), "r" (val));
}
static inline void __raw_writeq_no_log(u64 val, volatile void __iomem *addr)
#define __raw_writeq __raw_writeq
static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
{
register u64 v asm ("r2");
@@ -112,7 +113,8 @@ static inline void __raw_writeq_no_log(u64 val, volatile void __iomem *addr)
: "r" (v));
}
static inline u8 __raw_readb_no_log(const volatile void __iomem *addr)
#define __raw_readb __raw_readb
static inline u8 __raw_readb(const volatile void __iomem *addr)
{
u8 val;
asm volatile("ldrb %0, %1"
@@ -121,7 +123,8 @@ static inline u8 __raw_readb_no_log(const volatile void __iomem *addr)
return val;
}
static inline u32 __raw_readl_no_log(const volatile void __iomem *addr)
#define __raw_readl __raw_readl
static inline u32 __raw_readl(const volatile void __iomem *addr)
{
u32 val;
asm volatile("ldr %0, %1"
@@ -130,7 +133,8 @@ static inline u32 __raw_readl_no_log(const volatile void __iomem *addr)
return val;
}
static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
#define __raw_readq __raw_readq
static inline u64 __raw_readq(const volatile void __iomem *addr)
{
register u64 val asm ("r2");
@@ -140,48 +144,6 @@ static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
return val;
}
/*
* There may be cases when clients don't want to support or can't support the
* logging. The appropriate functions can be used but clients should carefully
* consider why they can't support the logging.
*/
#define __raw_write_logged(v, a, _t) ({ \
int _ret; \
volatile void __iomem *_a = (a); \
void *_addr = (void __force *)(_a); \
_ret = uncached_logk(LOGK_WRITEL, _addr); \
ETB_WAYPOINT; \
__raw_write##_t##_no_log((v), _a); \
if (_ret) \
LOG_BARRIER; \
})
#define __raw_writeb(v, a) __raw_write_logged((v), (a), b)
#define __raw_writew(v, a) __raw_write_logged((v), (a), w)
#define __raw_writel(v, a) __raw_write_logged((v), (a), l)
#define __raw_writeq(v, a) __raw_write_logged((v), (a), q)
#define __raw_read_logged(a, _l, _t) ({ \
unsigned _t __a; \
const volatile void __iomem *_a = (a); \
void *_addr = (void __force *)(_a); \
int _ret; \
_ret = uncached_logk(LOGK_READL, _addr); \
ETB_WAYPOINT; \
__a = __raw_read##_l##_no_log(_a);\
if (_ret) \
LOG_BARRIER; \
__a; \
})
#define __raw_readb(a) __raw_read_logged((a), b, char)
#define __raw_readw(a) __raw_read_logged((a), w, short)
#define __raw_readl(a) __raw_read_logged((a), l, int)
#define __raw_readq(a) __raw_read_logged((a), q, long long)
/*
* Architecture ioremap implementation.
*/
@@ -363,24 +325,12 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
__raw_readl(c)); __r; })
#define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64) \
__raw_readq(c)); __r; })
#define readb_relaxed_no_log(c) ({ u8 __r = __raw_readb_no_log(c); __r; })
#define readl_relaxed_no_log(c) ({ u32 __r = le32_to_cpu((__force __le32) \
__raw_readl_no_log(c)); __r; })
#define readq_relaxed_no_log(c) ({ u64 __r = le64_to_cpu((__force __le64) \
__raw_readq_no_log(c)); __r; })
#define writeb_relaxed(v, c) __raw_writeb(v, c)
#define writew_relaxed(v, c) __raw_writew((__force u16) cpu_to_le16(v), c)
#define writel_relaxed(v, c) __raw_writel((__force u32) cpu_to_le32(v), c)
#define writeq_relaxed(v, c) __raw_writeq((__force u64) cpu_to_le64(v), c)
#define writeb_relaxed_no_log(v, c) ((void)__raw_writeb_no_log((v), (c)))
#define writew_relaxed_no_log(v, c) __raw_writew_no_log((__force u16) \
cpu_to_le16(v), c)
#define writel_relaxed_no_log(v, c) __raw_writel_no_log((__force u32) \
cpu_to_le32(v), c)
#define writeq_relaxed_no_log(v, c) __raw_writeq_no_log((__force u64) \
cpu_to_le64(v), c)
#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
@@ -401,24 +351,6 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
#define writesw(p,d,l) __raw_writesw(p,d,l)
#define writesl(p,d,l) __raw_writesl(p,d,l)
#define readb_no_log(c) \
({ u8 __v = readb_relaxed_no_log(c); __iormb(); __v; })
#define readw_no_log(c) \
({ u16 __v = readw_relaxed_no_log(c); __iormb(); __v; })
#define readl_no_log(c) \
({ u32 __v = readl_relaxed_no_log(c); __iormb(); __v; })
#define readq_no_log(c) \
({ u64 __v = readq_relaxed_no_log(c); __iormb(); __v; })
#define writeb_no_log(v, c) \
({ __iowmb(); writeb_relaxed_no_log((v), (c)); })
#define writew_no_log(v, c) \
({ __iowmb(); writew_relaxed_no_log((v), (c)); })
#define writel_no_log(v, c) \
({ __iowmb(); writel_relaxed_no_log((v), (c)); })
#define writeq_no_log(v, c) \
({ __iowmb(); writeq_relaxed_no_log((v), (c)); })
#ifndef __ARMBE__
static inline void memset_io(volatile void __iomem *dst, unsigned c,
size_t count)

View File

@@ -46,21 +46,21 @@ EXPORT_SYMBOL(atomic_io_modify);
void _memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
{
while (count && (!IO_CHECK_ALIGN(from, 8) || !IO_CHECK_ALIGN(to, 8))) {
*(u8 *)to = readb_relaxed_no_log(from);
*(u8 *)to = readb_relaxed(from);
from++;
to++;
count--;
}
while (count >= 8) {
*(u64 *)to = readq_relaxed_no_log(from);
*(u64 *)to = readq_relaxed(from);
from += 8;
to += 8;
count -= 8;
}
while (count) {
*(u8 *)to = readb_relaxed_no_log(from);
*(u8 *)to = readb_relaxed(from);
from++;
to++;
count--;
@@ -76,21 +76,21 @@ void _memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
void *p = (void __force *)to;
while (count && (!IO_CHECK_ALIGN(p, 8) || !IO_CHECK_ALIGN(from, 8))) {
writeb_relaxed_no_log(*(volatile u8 *)from, p);
writeb_relaxed(*(volatile u8 *)from, p);
from++;
p++;
count--;
}
while (count >= 8) {
writeq_relaxed_no_log(*(volatile u64 *)from, p);
writeq_relaxed(*(volatile u64 *)from, p);
from += 8;
p += 8;
count -= 8;
}
while (count) {
writeb_relaxed_no_log(*(volatile u8 *)from, p);
writeb_relaxed(*(volatile u8 *)from, p);
from++;
p++;
count--;
@@ -111,19 +111,19 @@ void _memset_io(volatile void __iomem *dst, int c, size_t count)
qc |= qc << 32;
while (count && !IO_CHECK_ALIGN(p, 8)) {
writeb_relaxed_no_log(c, p);
writeb_relaxed(c, p);
p++;
count--;
}
while (count >= 8) {
writeq_relaxed_no_log(qc, p);
writeq_relaxed(qc, p);
p += 8;
count -= 8;
}
while (count) {
writeb_relaxed_no_log(c, p);
writeb_relaxed(c, p);
p++;
count--;
}

View File

@@ -12,11 +12,13 @@ config ARM64
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
select ARCH_HAS_KCOV
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_REFCOUNT_FULL
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_STRICT_KERNEL_RWX
@@ -93,6 +95,7 @@ config ARM64
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT

View File

@@ -84,6 +84,10 @@ ifeq ($(CONFIG_COMPAT_VDSO), y)
else ifeq ($(cc-name),clang)
export CLANG_TRIPLE_ARM32 ?= $(CROSS_COMPILE_ARM32)
export CLANG_TARGET_ARM32 := --target=$(notdir $(CLANG_TRIPLE_ARM32:%-=%))
export GCC_TOOLCHAIN32_DIR := $(dir $(shell which $(CROSS_COMPILE_ARM32)ld))
export GCC_TOOLCHAIN32 := $(realpath $(GCC_TOOLCHAIN32_DIR)/..)
export CLANG_PREFIX32 := --prefix=$(GCC_TOOLCHAIN32_DIR)
export CLANG_GCC32_TC := --gcc-toolchain=$(GCC_TOOLCHAIN32)
export CONFIG_VDSO32 := y
vdso32 := -DCONFIG_VDSO32=1
else ifeq ($(shell which $(CROSS_COMPILE_ARM32)$(cc-name) 2> /dev/null),)

View File

@@ -62,7 +62,6 @@
qcom,mdss-panel-on-dimming-delay = <120>;
/* IRQF_ONESHOT | IRQF_TRIGGER_FALLING */
/* trig-flags: falling-0x0002 rasing-0x0001 */
qcom,esd-err-irq-gpio = <&tlmm 5 0x2002>;
qcom,disp-doze-lpm-backlight = <20>;
qcom,disp-doze-hbm-backlight = <266>;

View File

@@ -62,7 +62,6 @@
qcom,mdss-panel-on-dimming-delay = <120>;
/* IRQF_ONESHOT | IRQF_TRIGGER_FALLING */
/* trig-flags: falling-0x0002 rasing-0x0001 */
qcom,esd-err-irq-gpio = <&tlmm 5 0x2002>;
qcom,disp-doze-lpm-backlight = <20>;
qcom,disp-doze-hbm-backlight = <266>;

View File

@@ -115,6 +115,7 @@ And public attribution of xiaomi platforms(like F1 and so and)
qcom,sw-jeita-enable;
qcom,step-charging-enable;
qcom,wd-bark-time-secs = <16>;
google,wdog_snarl_disable;
};
&qupv3_se1_i2c {

View File

@@ -82,8 +82,7 @@
qcom,gpu-quirk-secvid-set-once;
qcom,gpu-quirk-cx-gdsc;
qcom,idle-timeout = <80>; //msecs
qcom,no-nap;
qcom,idle-timeout = <64>; //msecs
qcom,highest-bank-bit = <15>;
@@ -100,8 +99,6 @@
tzone-names = "gpuss-0-usr", "gpuss-1-usr";
qcom,pm-qos-active-latency = <44>;
clocks = <&clock_gpucc GPU_CC_CXO_CLK>,
<&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
<&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>,

View File

@@ -93,6 +93,11 @@
#include "msm-arm-smmu-sm8150-v2.dtsi"
&pcie0 {
qcom,msm-bus,vectors-KBps =
<100 512 0 0>,
<100 512 500 2000000>;
reg = <0x1c00000 0x4000>,
<0x1c06000 0x1000>,
<0x60000000 0xf1d>,
@@ -203,6 +208,11 @@
};
&pcie1 {
qcom,msm-bus,vectors-KBps =
<100 512 0 0>,
<100 512 500 2000000>;
reg = <0x1c08000 0x4000>,
<0x1c0e000 0x2000>,
<0x40000000 0xf1d>,
@@ -1047,8 +1057,8 @@
< 1401600 998400000 >,
< 1708800 1267200000 >,
< 2016000 1344000000 >,
< 2419200 1536000000 >,
< 2841600 1612800000 >;
< 2227200 1536000000 >,
< 2419200 1612800000 >;
};
&cpu7_cpu_l3_latmon {

View File

@@ -549,7 +549,7 @@
};
chosen {
bootargs = "rcupdate.rcu_expedited=1 rcu_nocbs=0-7 cgroup.memory=nokmem,nosocket";
bootargs = "rcu_nocbs=0-7 cgroup.memory=nokmem,nosocket noirqdebug";
};
soc: soc { };

View File

@@ -449,6 +449,16 @@
status = "ok";
};
&thermal_zones {
gpuss-1-usr {
status = "disabled";
};
};
&msm_gpu {
tzone-names = "gpuss-0-usr";
};
&usb2_phy1 {
status = "ok";
};

File diff suppressed because it is too large Load Diff

View File

@@ -120,8 +120,8 @@ static inline void gic_write_bpr1(u32 val)
write_sysreg_s(val, SYS_ICC_BPR1_EL1);
}
#define gic_read_typer(c) readq_relaxed_no_log(c)
#define gic_write_irouter(v, c) writeq_relaxed_no_log(v, c)
#define gic_read_typer(c) readq_relaxed(c)
#define gic_write_irouter(v, c) writeq_relaxed(v, c)
#define gic_read_lpir(c) readq_relaxed(c)
#define gic_write_lpir(v, c) writeq_relaxed(v, c)

View File

@@ -21,13 +21,37 @@
#define __ASM_ATOMIC_H
#include <linux/compiler.h>
#include <linux/stringify.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/brk-imm.h>
#include <asm/lse.h>
#ifdef __KERNEL__
/*
* To avoid having to allocate registers that pass the counter address and
* address of the call site to the overflow handler, encode the register and
* call site offset in a dummy cbz instruction that we can decode later.
*/
#define REFCOUNT_CHECK_TAIL \
" .subsection 1\n" \
"33: brk " __stringify(REFCOUNT_BRK_IMM) "\n" \
" cbz %[counter], 22b\n" /* never reached */ \
" .previous\n"
#define REFCOUNT_POST_CHECK_NEG \
"22: b.mi 33f\n" \
REFCOUNT_CHECK_TAIL
#define REFCOUNT_POST_CHECK_NEG_OR_ZERO \
" b.eq 33f\n" \
REFCOUNT_POST_CHECK_NEG
#define REFCOUNT_PRE_CHECK_ZERO(reg) "ccmp " #reg ", wzr, #8, pl\n"
#define REFCOUNT_PRE_CHECK_NONE(reg)
#define __ARM64_IN_ATOMIC_IMPL
#if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE)

View File

@@ -327,4 +327,54 @@ __CMPXCHG_DBL(_mb, dmb ish, l, "memory")
#undef __CMPXCHG_DBL
#define REFCOUNT_OP(op, asm_op, pre, post, l) \
__LL_SC_INLINE int \
__LL_SC_PREFIX(__refcount_##op(int i, atomic_t *r)) \
{ \
unsigned int tmp; \
int result; \
\
asm volatile("// refcount_" #op "\n" \
" prfm pstl1strm, %[cval]\n" \
"1: ldxr %w1, %[cval]\n" \
" " #asm_op " %w[val], %w1, %w[i]\n" \
REFCOUNT_PRE_CHECK_ ## pre (%w1) \
" st" #l "xr %w1, %w[val], %[cval]\n" \
" cbnz %w1, 1b\n" \
REFCOUNT_POST_CHECK_ ## post \
: [val] "=&r"(result), "=&r"(tmp), [cval] "+Q"(r->counter) \
: [counter] "r"(&r->counter), [i] "Ir" (i) \
: "cc"); \
\
return result; \
} \
__LL_SC_EXPORT(__refcount_##op);
REFCOUNT_OP(add_lt, adds, ZERO, NEG_OR_ZERO, );
REFCOUNT_OP(sub_lt, subs, NONE, NEG, l);
REFCOUNT_OP(sub_le, subs, NONE, NEG_OR_ZERO, l);
__LL_SC_INLINE int
__LL_SC_PREFIX(__refcount_add_not_zero(int i, atomic_t *r))
{
unsigned int tmp;
int result;
asm volatile("// refcount_add_not_zero\n"
" prfm pstl1strm, %[cval]\n"
"1: ldxr %w[val], %[cval]\n"
" cbz %w[val], 2f\n"
" adds %w[val], %w[val], %w[i]\n"
" stxr %w1, %w[val], %[cval]\n"
" cbnz %w1, 1b\n"
REFCOUNT_POST_CHECK_NEG
"2:"
: [val] "=&r" (result), "=&r" (tmp), [cval] "+Q" (r->counter)
: [counter] "r"(&r->counter), [i] "Ir" (i)
: "cc");
return result;
}
__LL_SC_EXPORT(__refcount_add_not_zero);
#endif /* __ASM_ATOMIC_LL_SC_H */

View File

@@ -531,4 +531,85 @@ __CMPXCHG_DBL(_mb, al, "memory")
#undef __LL_SC_CMPXCHG_DBL
#undef __CMPXCHG_DBL
#define REFCOUNT_ADD_OP(op, pre, post) \
static inline int __refcount_##op(int i, atomic_t *r) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = r; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_CALL(__refcount_##op) \
" cmp %w0, wzr\n" \
__nops(1), \
/* LSE atomics */ \
" ldadd %w[i], w30, %[cval]\n" \
" adds %w[i], %w[i], w30\n" \
REFCOUNT_PRE_CHECK_ ## pre (w30)) \
REFCOUNT_POST_CHECK_ ## post \
: [i] "+r" (w0), [cval] "+Q" (r->counter) \
: [counter] "r"(&r->counter), "r" (x1) \
: __LL_SC_CLOBBERS, "cc"); \
\
return w0; \
}
REFCOUNT_ADD_OP(add_lt, ZERO, NEG_OR_ZERO);
#define REFCOUNT_SUB_OP(op, post) \
static inline int __refcount_##op(int i, atomic_t *r) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = r; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_CALL(__refcount_##op) \
" cmp %w0, wzr\n" \
__nops(1), \
/* LSE atomics */ \
" neg %w[i], %w[i]\n" \
" ldaddl %w[i], w30, %[cval]\n" \
" adds %w[i], %w[i], w30\n") \
REFCOUNT_POST_CHECK_ ## post \
: [i] "+r" (w0), [cval] "+Q" (r->counter) \
: [counter] "r" (&r->counter), "r" (x1) \
: __LL_SC_CLOBBERS, "cc"); \
\
return w0; \
}
REFCOUNT_SUB_OP(sub_lt, NEG);
REFCOUNT_SUB_OP(sub_le, NEG_OR_ZERO);
static inline int __refcount_add_not_zero(int i, atomic_t *r)
{
register int result asm ("w0");
register atomic_t *x1 asm ("x1") = r;
asm volatile(ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
" mov %w0, %w[i]\n"
__LL_SC_CALL(__refcount_add_not_zero)
" cmp %w0, wzr\n"
__nops(6),
/* LSE atomics */
" ldr %w0, %[cval]\n"
"1: cmp %w0, wzr\n"
" b.eq 2f\n"
" add w30, %w0, %w[i]\n"
" cas %w0, w30, %[cval]\n"
" sub w30, w30, %w[i]\n"
" cmp %w0, w30\n"
" b.ne 1b\n"
" adds %w0, w30, %w[i]\n"
"2:\n")
REFCOUNT_POST_CHECK_NEG
: "=&r" (result), [cval] "+Q" (r->counter)
: [counter] "r" (&r->counter), [i] "Ir" (i), "r" (x1)
: __LL_SC_CLOBBERS, "cc");
return result;
}
#endif /* __ASM_ATOMIC_LSE_H */

View File

@@ -19,6 +19,7 @@
* 0x9xx: tag-based KASAN trap (allowed values 0x900 - 0x9ff)
*/
#define FAULT_BRK_IMM 0x100
#define REFCOUNT_BRK_IMM 0x101
#define KGDB_DYN_DBG_BRK_IMM 0x400
#define KGDB_COMPILED_DBG_BRK_IMM 0x401
#define BUG_BRK_IMM 0x800

View File

@@ -353,7 +353,7 @@ static inline bool cpu_have_feature(unsigned int num)
}
/* System capability check for constant caps */
static inline bool __cpus_have_const_cap(int num)
static __always_inline bool __cpus_have_const_cap(int num)
{
if (num >= ARM64_NCAPS)
return false;
@@ -367,7 +367,7 @@ static inline bool cpus_have_cap(unsigned int num)
return test_bit(num, cpu_hwcaps);
}
static inline bool cpus_have_const_cap(int num)
static __always_inline bool cpus_have_const_cap(int num)
{
if (static_branch_likely(&arm64_const_caps_ready))
return __cpus_have_const_cap(num);

View File

@@ -95,18 +95,24 @@ struct step_hook {
int (*fn)(struct pt_regs *regs, unsigned int esr);
};
void register_step_hook(struct step_hook *hook);
void unregister_step_hook(struct step_hook *hook);
void register_user_step_hook(struct step_hook *hook);
void unregister_user_step_hook(struct step_hook *hook);
void register_kernel_step_hook(struct step_hook *hook);
void unregister_kernel_step_hook(struct step_hook *hook);
struct break_hook {
struct list_head node;
u32 esr_val;
u32 esr_mask;
int (*fn)(struct pt_regs *regs, unsigned int esr);
u16 imm;
u16 mask; /* These bits are ignored when comparing with imm */
};
void register_break_hook(struct break_hook *hook);
void unregister_break_hook(struct break_hook *hook);
void register_user_break_hook(struct break_hook *hook);
void unregister_user_break_hook(struct break_hook *hook);
void register_kernel_break_hook(struct break_hook *hook);
void unregister_kernel_break_hook(struct break_hook *hook);
u8 debug_monitors_arch(void);

View File

@@ -30,35 +30,38 @@
#include <asm/early_ioremap.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <linux/msm_rtb.h>
#include <xen/xen.h>
/*
* Generic IO read/write. These perform native-endian accesses.
* that some architectures will want to re-define __raw_{read,write}w.
*/
static inline void __raw_writeb_no_log(u8 val, volatile void __iomem *addr)
#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
{
asm volatile("strb %w0, [%1]" : : "rZ" (val), "r" (addr));
}
static inline void __raw_writew_no_log(u16 val, volatile void __iomem *addr)
#define __raw_writew __raw_writew
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
{
asm volatile("strh %w0, [%1]" : : "rZ" (val), "r" (addr));
}
static inline void __raw_writel_no_log(u32 val, volatile void __iomem *addr)
#define __raw_writel __raw_writel
static inline void __raw_writel(u32 val, volatile void __iomem *addr)
{
asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));
}
static inline void __raw_writeq_no_log(u64 val, volatile void __iomem *addr)
#define __raw_writeq __raw_writeq
static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
{
asm volatile("str %x0, [%1]" : : "rZ" (val), "r" (addr));
}
static inline u8 __raw_readb_no_log(const volatile void __iomem *addr)
#define __raw_readb __raw_readb
static inline u8 __raw_readb(const volatile void __iomem *addr)
{
u8 val;
asm volatile(ALTERNATIVE("ldrb %w0, [%1]",
@@ -68,7 +71,8 @@ static inline u8 __raw_readb_no_log(const volatile void __iomem *addr)
return val;
}
static inline u16 __raw_readw_no_log(const volatile void __iomem *addr)
#define __raw_readw __raw_readw
static inline u16 __raw_readw(const volatile void __iomem *addr)
{
u16 val;
@@ -79,7 +83,8 @@ static inline u16 __raw_readw_no_log(const volatile void __iomem *addr)
return val;
}
static inline u32 __raw_readl_no_log(const volatile void __iomem *addr)
#define __raw_readl __raw_readl
static inline u32 __raw_readl(const volatile void __iomem *addr)
{
u32 val;
asm volatile(ALTERNATIVE("ldr %w0, [%1]",
@@ -89,7 +94,8 @@ static inline u32 __raw_readl_no_log(const volatile void __iomem *addr)
return val;
}
static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
#define __raw_readq __raw_readq
static inline u64 __raw_readq(const volatile void __iomem *addr)
{
u64 val;
asm volatile(ALTERNATIVE("ldr %0, [%1]",
@@ -99,46 +105,6 @@ static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
return val;
}
/*
* There may be cases when clients don't want to support or can't support the
* logging, The appropriate functions can be used but clinets should carefully
* consider why they can't support the logging
*/
#define __raw_write_logged(v, a, _t) ({ \
int _ret; \
volatile void __iomem *_a = (a); \
void *_addr = (void __force *)(_a); \
_ret = uncached_logk(LOGK_WRITEL, _addr); \
ETB_WAYPOINT; \
__raw_write##_t##_no_log((v), _a); \
if (_ret) \
LOG_BARRIER; \
})
#define __raw_writeb(v, a) __raw_write_logged((v), a, b)
#define __raw_writew(v, a) __raw_write_logged((v), a, w)
#define __raw_writel(v, a) __raw_write_logged((v), a, l)
#define __raw_writeq(v, a) __raw_write_logged((v), a, q)
#define __raw_read_logged(a, _l, _t) ({ \
_t __a; \
const volatile void __iomem *_a = (a); \
void *_addr = (void __force *)(_a); \
int _ret; \
_ret = uncached_logk(LOGK_READL, _addr); \
ETB_WAYPOINT; \
__a = __raw_read##_l##_no_log(_a); \
if (_ret) \
LOG_BARRIER; \
__a; \
})
#define __raw_readb(a) __raw_read_logged((a), b, u8)
#define __raw_readw(a) __raw_read_logged((a), w, u16)
#define __raw_readl(a) __raw_read_logged((a), l, u32)
#define __raw_readq(a) __raw_read_logged((a), q, u64)
/* IO barriers */
#define __iormb(v) \
({ \
@@ -176,22 +142,6 @@ static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
#define readb_relaxed_no_log(c) ({ u8 __v = __raw_readb_no_log(c); __v; })
#define readw_relaxed_no_log(c) \
({ u16 __v = le16_to_cpu((__force __le16)__raw_readw_no_log(c)); __v; })
#define readl_relaxed_no_log(c) \
({ u32 __v = le32_to_cpu((__force __le32)__raw_readl_no_log(c)); __v; })
#define readq_relaxed_no_log(c) \
({ u64 __v = le64_to_cpu((__force __le64)__raw_readq_no_log(c)); __v; })
#define writeb_relaxed_no_log(v, c) ((void)__raw_writeb_no_log((v), (c)))
#define writew_relaxed_no_log(v, c) \
((void)__raw_writew_no_log((__force u16)cpu_to_le32(v), (c)))
#define writel_relaxed_no_log(v, c) \
((void)__raw_writel_no_log((__force u32)cpu_to_le32(v), (c)))
#define writeq_relaxed_no_log(v, c) \
((void)__raw_writeq_no_log((__force u64)cpu_to_le32(v), (c)))
/*
* I/O memory access primitives. Reads are ordered relative to any
* following Normal memory access. Writes are ordered relative to any prior
@@ -207,24 +157,6 @@ static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); })
#define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c)); })
#define readb_no_log(c) \
({ u8 __v = readb_relaxed_no_log(c); __iormb(__v); __v; })
#define readw_no_log(c) \
({ u16 __v = readw_relaxed_no_log(c); __iormb(__v); __v; })
#define readl_no_log(c) \
({ u32 __v = readl_relaxed_no_log(c); __iormb(__v); __v; })
#define readq_no_log(c) \
({ u64 __v = readq_relaxed_no_log(c); __iormb(__v); __v; })
#define writeb_no_log(v, c) \
({ __iowmb(); writeb_relaxed_no_log((v), (c)); })
#define writew_no_log(v, c) \
({ __iowmb(); writew_relaxed_no_log((v), (c)); })
#define writel_no_log(v, c) \
({ __iowmb(); writel_relaxed_no_log((v), (c)); })
#define writeq_no_log(v, c) \
({ __iowmb(); writeq_relaxed_no_log((v), (c)); })
/*
* I/O port access primitives.
*/

View File

@@ -26,13 +26,16 @@
#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
static __always_inline bool arch_static_branch(struct static_key *key,
bool branch)
{
asm_volatile_goto("1: nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align 3\n\t"
".quad 1b, %l[l_yes], %c0\n\t"
".popsection\n\t"
asm_volatile_goto(
"1: nop \n\t"
" .pushsection __jump_table, \"aw\" \n\t"
" .align 3 \n\t"
" .long 1b - ., %l[l_yes] - . \n\t"
" .quad %c0 - . \n\t"
" .popsection \n\t"
: : "i"(&((char *)key)[branch]) : : l_yes);
return false;
@@ -40,13 +43,16 @@ l_yes:
return true;
}
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
static __always_inline bool arch_static_branch_jump(struct static_key *key,
bool branch)
{
asm_volatile_goto("1: b %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align 3\n\t"
".quad 1b, %l[l_yes], %c0\n\t"
".popsection\n\t"
asm_volatile_goto(
"1: b %l[l_yes] \n\t"
" .pushsection __jump_table, \"aw\" \n\t"
" .align 3 \n\t"
" .long 1b - ., %l[l_yes] - . \n\t"
" .quad %c0 - . \n\t"
" .popsection \n\t"
: : "i"(&((char *)key)[branch]) : : l_yes);
return false;
@@ -54,13 +60,5 @@ l_yes:
return true;
}
typedef u64 jump_label_t;
struct jump_entry {
jump_label_t code;
jump_label_t target;
jump_label_t key;
};
#endif /* __ASSEMBLY__ */
#endif /* __ASM_JUMP_LABEL_H */

View File

@@ -34,21 +34,16 @@
#include <asm/pgtable.h>
#include <asm/sysreg.h>
#include <asm/tlbflush.h>
#include <linux/msm_rtb.h>
extern bool rodata_full;
static inline void contextidr_thread_switch(struct task_struct *next)
{
pid_t pid = task_pid_nr(next);
if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
return;
write_sysreg(pid, contextidr_el1);
write_sysreg(task_pid_nr(next), contextidr_el1);
isb();
}
/*

View File

@@ -0,0 +1,43 @@
/*
* Copyright (C) 2018 Linaro, Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_NEON_INTRINSICS_H
#define __ASM_NEON_INTRINSICS_H
#include <asm-generic/int-ll64.h>
/*
* In the kernel, u64/s64 are [un]signed long long, not [un]signed long.
* So by redefining these macros to the former, we can force gcc-stdint.h
* to define uint64_t / in64_t in a compatible manner.
*/
#ifdef __INT64_TYPE__
#undef __INT64_TYPE__
#define __INT64_TYPE__ long long
#endif
#ifdef __UINT64_TYPE__
#undef __UINT64_TYPE__
#define __UINT64_TYPE__ unsigned long long
#endif
/*
* genksyms chokes on the ARM NEON instrinsics system header, but we
* don't export anything it defines anyway, so just disregard when
* genksyms execute.
*/
#ifndef __GENKSYMS__
#include <arm_neon.h>
#endif
#ifdef CONFIG_CC_IS_CLANG
#pragma clang diagnostic ignored "-Wincompatible-pointer-types"
#endif
#endif /* __ASM_NEON_INTRINSICS_H */

View File

@@ -0,0 +1,60 @@
/*
* arm64-specific implementation of refcount_t. Based on x86 version and
* PAX_REFCOUNT from PaX/grsecurity.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_REFCOUNT_H
#define __ASM_REFCOUNT_H
#include <linux/refcount.h>
#include <asm/atomic.h>
static __always_inline void refcount_add(int i, refcount_t *r)
{
__refcount_add_lt(i, &r->refs);
}
static __always_inline void refcount_inc(refcount_t *r)
{
__refcount_add_lt(1, &r->refs);
}
static __always_inline void refcount_dec(refcount_t *r)
{
__refcount_sub_le(1, &r->refs);
}
static __always_inline __must_check bool refcount_sub_and_test(unsigned int i,
refcount_t *r)
{
bool ret = __refcount_sub_lt(i, &r->refs) == 0;
if (ret) {
smp_acquire__after_ctrl_dep();
return true;
}
return false;
}
static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
{
return refcount_sub_and_test(1, r);
}
static __always_inline __must_check bool refcount_add_not_zero(unsigned int i,
refcount_t *r)
{
return __refcount_add_not_zero(i, &r->refs) != 0;
}
static __always_inline __must_check bool refcount_inc_not_zero(refcount_t *r)
{
return __refcount_add_not_zero(1, &r->refs) != 0;
}
#endif

View File

@@ -95,14 +95,7 @@ extern void secondary_entry(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
#else
static inline void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
{
BUILD_BUG();
}
#endif
extern int __cpu_disable(void);

View File

@@ -38,8 +38,7 @@ arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_trace_counters.o \
perf_trace_user.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
arm64-obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o

View File

@@ -162,25 +162,46 @@ static void clear_regs_spsr_ss(struct pt_regs *regs)
}
NOKPROBE_SYMBOL(clear_regs_spsr_ss);
/* EL1 Single Step Handler hooks */
static LIST_HEAD(step_hook);
static DEFINE_SPINLOCK(step_hook_lock);
static DEFINE_SPINLOCK(debug_hook_lock);
static LIST_HEAD(user_step_hook);
static LIST_HEAD(kernel_step_hook);
void register_step_hook(struct step_hook *hook)
static void register_debug_hook(struct list_head *node, struct list_head *list)
{
spin_lock(&step_hook_lock);
list_add_rcu(&hook->node, &step_hook);
spin_unlock(&step_hook_lock);
spin_lock(&debug_hook_lock);
list_add_rcu(node, list);
spin_unlock(&debug_hook_lock);
}
void unregister_step_hook(struct step_hook *hook)
static void unregister_debug_hook(struct list_head *node)
{
spin_lock(&step_hook_lock);
list_del_rcu(&hook->node);
spin_unlock(&step_hook_lock);
spin_lock(&debug_hook_lock);
list_del_rcu(node);
spin_unlock(&debug_hook_lock);
synchronize_rcu();
}
void register_user_step_hook(struct step_hook *hook)
{
register_debug_hook(&hook->node, &user_step_hook);
}
void unregister_user_step_hook(struct step_hook *hook)
{
unregister_debug_hook(&hook->node);
}
void register_kernel_step_hook(struct step_hook *hook)
{
register_debug_hook(&hook->node, &kernel_step_hook);
}
void unregister_kernel_step_hook(struct step_hook *hook)
{
unregister_debug_hook(&hook->node);
}
/*
* Call registered single step handlers
* There is no Syndrome info to check for determining the handler.
@@ -190,11 +211,14 @@ void unregister_step_hook(struct step_hook *hook)
static int call_step_hook(struct pt_regs *regs, unsigned int esr)
{
struct step_hook *hook;
struct list_head *list;
int retval = DBG_HOOK_ERROR;
list = user_mode(regs) ? &user_step_hook : &kernel_step_hook;
rcu_read_lock();
list_for_each_entry_rcu(hook, &step_hook, node) {
list_for_each_entry_rcu(hook, list, node) {
retval = hook->fn(regs, esr);
if (retval == DBG_HOOK_HANDLED)
break;
@@ -272,33 +296,44 @@ NOKPROBE_SYMBOL(single_step_handler);
* hit within breakpoint handler, especically in kprobes.
* Use reader/writer locks instead of plain spinlock.
*/
static LIST_HEAD(break_hook);
static DEFINE_SPINLOCK(break_hook_lock);
static LIST_HEAD(user_break_hook);
static LIST_HEAD(kernel_break_hook);
void register_break_hook(struct break_hook *hook)
void register_user_break_hook(struct break_hook *hook)
{
spin_lock(&break_hook_lock);
list_add_rcu(&hook->node, &break_hook);
spin_unlock(&break_hook_lock);
register_debug_hook(&hook->node, &user_break_hook);
}
void unregister_break_hook(struct break_hook *hook)
void unregister_user_break_hook(struct break_hook *hook)
{
spin_lock(&break_hook_lock);
list_del_rcu(&hook->node);
spin_unlock(&break_hook_lock);
synchronize_rcu();
unregister_debug_hook(&hook->node);
}
void register_kernel_break_hook(struct break_hook *hook)
{
register_debug_hook(&hook->node, &kernel_break_hook);
}
void unregister_kernel_break_hook(struct break_hook *hook)
{
unregister_debug_hook(&hook->node);
}
static int call_break_hook(struct pt_regs *regs, unsigned int esr)
{
struct break_hook *hook;
struct list_head *list;
int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
list = user_mode(regs) ? &user_break_hook : &kernel_break_hook;
rcu_read_lock();
list_for_each_entry_rcu(hook, &break_hook, node)
if ((esr & hook->esr_mask) == hook->esr_val)
list_for_each_entry_rcu(hook, list, node) {
unsigned int comment = esr & BRK64_ESR_MASK;
if ((comment & ~hook->mask) == hook->imm)
fn = hook->fn;
}
rcu_read_unlock();
return fn ? fn(regs, esr) : DBG_HOOK_ERROR;

View File

@@ -73,11 +73,7 @@
#ifdef CONFIG_EFI
/*
* Use ABSOLUTE() to avoid ld.lld treating this as a relative symbol:
* https://github.com/ClangBuiltLinux/linux/issues/561
*/
__efistub_stext_offset = ABSOLUTE(stext - _text);
__efistub_stext_offset = stext - _text;
/*
* The EFI stub has its own symbol namespace prefixed by __efistub_, to
@@ -105,9 +101,6 @@ __efistub___memmove = __pi_memmove;
__efistub___memset = __pi_memset;
#endif
__efistub__text = _text;
__efistub__end = _end;
__efistub__edata = _edata;
__efistub_screen_info = screen_info;
#endif

View File

@@ -27,21 +27,21 @@ void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
{
while (count && (!IS_ALIGNED((unsigned long)from, 8) ||
!IS_ALIGNED((unsigned long)to, 8))) {
*(u8 *)to = __raw_readb_no_log(from);
*(u8 *)to = __raw_readb(from);
from++;
to++;
count--;
}
while (count >= 8) {
*(u64 *)to = __raw_readq_no_log(from);
*(u64 *)to = __raw_readq(from);
from += 8;
to += 8;
count -= 8;
}
while (count) {
*(u8 *)to = __raw_readb_no_log(from);
*(u8 *)to = __raw_readb(from);
from++;
to++;
count--;
@@ -56,21 +56,21 @@ void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
{
while (count && (!IS_ALIGNED((unsigned long)to, 8) ||
!IS_ALIGNED((unsigned long)from, 8))) {
__raw_writeb_no_log(*(volatile u8 *)from, to);
__raw_writeb(*(volatile u8 *)from, to);
from++;
to++;
count--;
}
while (count >= 8) {
__raw_writeq_no_log(*(volatile u64 *)from, to);
__raw_writeq(*(volatile u64 *)from, to);
from += 8;
to += 8;
count -= 8;
}
while (count) {
__raw_writeb_no_log(*(volatile u8 *)from, to);
__raw_writeb(*(volatile u8 *)from, to);
from++;
to++;
count--;
@@ -90,19 +90,19 @@ void __memset_io(volatile void __iomem *dst, int c, size_t count)
qc |= qc << 32;
while (count && !IS_ALIGNED((unsigned long)dst, 8)) {
__raw_writeb_no_log(c, dst);
__raw_writeb(c, dst);
dst++;
count--;
}
while (count >= 8) {
__raw_writeq_no_log(qc, dst);
__raw_writeq(qc, dst);
dst += 8;
count -= 8;
}
while (count) {
__raw_writeb_no_log(c, dst);
__raw_writeb(c, dst);
dst++;
count--;
}

View File

@@ -25,12 +25,12 @@
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
void *addr = (void *)entry->code;
void *addr = (void *)jump_entry_code(entry);
u32 insn;
if (type == JUMP_LABEL_JMP) {
insn = aarch64_insn_gen_branch_imm(entry->code,
entry->target,
insn = aarch64_insn_gen_branch_imm(jump_entry_code(entry),
jump_entry_target(entry),
AARCH64_INSN_BRANCH_NOLINK);
} else {
insn = aarch64_insn_gen_nop();

View File

@@ -264,15 +264,13 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
NOKPROBE_SYMBOL(kgdb_step_brk_fn);
static struct break_hook kgdb_brkpt_hook = {
.esr_mask = 0xffffffff,
.esr_val = (u32)ESR_ELx_VAL_BRK64(KGDB_DYN_DBG_BRK_IMM),
.fn = kgdb_brk_fn
.fn = kgdb_brk_fn,
.imm = KGDB_DYN_DBG_BRK_IMM,
};
static struct break_hook kgdb_compiled_brkpt_hook = {
.esr_mask = 0xffffffff,
.esr_val = (u32)ESR_ELx_VAL_BRK64(KGDB_COMPILED_DBG_BRK_IMM),
.fn = kgdb_compiled_brk_fn
.fn = kgdb_compiled_brk_fn,
.imm = KGDB_COMPILED_DBG_BRK_IMM,
};
static struct step_hook kgdb_step_hook = {
@@ -333,9 +331,9 @@ int kgdb_arch_init(void)
if (ret != 0)
return ret;
register_break_hook(&kgdb_brkpt_hook);
register_break_hook(&kgdb_compiled_brkpt_hook);
register_step_hook(&kgdb_step_hook);
register_kernel_break_hook(&kgdb_brkpt_hook);
register_kernel_break_hook(&kgdb_compiled_brkpt_hook);
register_kernel_step_hook(&kgdb_step_hook);
return 0;
}
@@ -346,9 +344,9 @@ int kgdb_arch_init(void)
*/
void kgdb_arch_exit(void)
{
unregister_break_hook(&kgdb_brkpt_hook);
unregister_break_hook(&kgdb_compiled_brkpt_hook);
unregister_step_hook(&kgdb_step_hook);
unregister_kernel_break_hook(&kgdb_brkpt_hook);
unregister_kernel_break_hook(&kgdb_compiled_brkpt_hook);
unregister_kernel_step_hook(&kgdb_step_hook);
unregister_die_notifier(&kgdb_notifier);
}

View File

@@ -2,4 +2,12 @@ SECTIONS {
.plt : { BYTE(0) }
.init.plt : { BYTE(0) }
.text.ftrace_trampoline : { BYTE(0) }
/* Undo -fdata-sections and -ffunction-sections */
.bss : { *(.bss .bss.[0-9a-zA-Z_]*) }
.data : { *(.data .data.[0-9a-zA-Z_]*) }
.rela.data : { *(.rela.data .rela.data.[0-9a-zA-Z_]*) }
.rela.text : { *(.rela.text .rela.text.[0-9a-zA-Z_]*) }
.rodata : { *(.rodata .rodata.[0-9a-zA-Z_]*) }
.text : { *(.text .text.[0-9a-zA-Z_]*) }
}

View File

@@ -1,178 +0,0 @@
/* Copyright (c) 2013-2014, 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/cpu.h>
#include <linux/tracepoint.h>
#include <trace/events/sched.h>
#define CREATE_TRACE_POINTS
#include "perf_trace_counters.h"
static unsigned int tp_pid_state;
DEFINE_PER_CPU(u32, cntenset_val);
DEFINE_PER_CPU(u32, previous_ccnt);
DEFINE_PER_CPU(u32[NUM_L1_CTRS], previous_l1_cnts);
DEFINE_PER_CPU(u32, old_pid);
DEFINE_PER_CPU(u32, hotplug_flag);
#define USE_CPUHP_STATE CPUHP_AP_ONLINE
static int tracectr_cpu_hotplug_coming_up(unsigned int cpu)
{
per_cpu(hotplug_flag, cpu) = 1;
return 0;
}
static void setup_prev_cnts(u32 cpu, u32 cnten_val)
{
int i;
if (cnten_val & CC)
per_cpu(previous_ccnt, cpu) =
read_sysreg(pmccntr_el0);
for (i = 0; i < NUM_L1_CTRS; i++) {
if (cnten_val & (1 << i)) {
/* Select */
write_sysreg(i, pmselr_el0);
isb();
/* Read value */
per_cpu(previous_l1_cnts[i], cpu) =
read_sysreg(pmxevcntr_el0);
}
}
}
void tracectr_notifier(void *ignore, bool preempt,
struct task_struct *prev, struct task_struct *next)
{
u32 cnten_val;
int current_pid;
u32 cpu = task_cpu(next);
if (tp_pid_state != 1)
return;
current_pid = next->pid;
if (per_cpu(old_pid, cpu) != -1) {
cnten_val = read_sysreg(pmcntenset_el0);
per_cpu(cntenset_val, cpu) = cnten_val;
/* Disable all the counters that were enabled */
write_sysreg(cnten_val, pmcntenclr_el0);
if (per_cpu(hotplug_flag, cpu) == 1) {
per_cpu(hotplug_flag, cpu) = 0;
setup_prev_cnts(cpu, cnten_val);
} else {
trace_sched_switch_with_ctrs(per_cpu(old_pid, cpu),
current_pid);
}
/* Enable all the counters that were disabled */
write_sysreg(cnten_val, pmcntenset_el0);
}
per_cpu(old_pid, cpu) = current_pid;
}
static void enable_tp_pid(void)
{
if (tp_pid_state == 0) {
tp_pid_state = 1;
register_trace_sched_switch(tracectr_notifier, NULL);
}
}
static void disable_tp_pid(void)
{
if (tp_pid_state == 1) {
tp_pid_state = 0;
unregister_trace_sched_switch(tracectr_notifier, NULL);
}
}
static ssize_t read_enabled_perftp_file_bool(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos)
{
char buf[2];
buf[1] = '\n';
if (tp_pid_state == 0)
buf[0] = '0';
else
buf[0] = '1';
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
}
static ssize_t write_enabled_perftp_file_bool(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
char buf[32];
size_t buf_size;
buf[0] = 0;
buf_size = min(count, (sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size))
return -EFAULT;
switch (buf[0]) {
case 'y':
case 'Y':
case '1':
enable_tp_pid();
break;
case 'n':
case 'N':
case '0':
disable_tp_pid();
break;
}
return count;
}
static const struct file_operations fops_perftp = {
.read = read_enabled_perftp_file_bool,
.write = write_enabled_perftp_file_bool,
.llseek = default_llseek,
};
int __init init_tracecounters(void)
{
struct dentry *dir;
struct dentry *file;
unsigned int value = 1;
int cpu, rc;
dir = debugfs_create_dir("perf_debug_tp", NULL);
if (!dir)
return -ENOMEM;
file = debugfs_create_file("enabled", 0660, dir,
&value, &fops_perftp);
if (!file) {
debugfs_remove(dir);
return -ENOMEM;
}
for_each_possible_cpu(cpu)
per_cpu(old_pid, cpu) = -1;
rc = cpuhp_setup_state_nocalls(USE_CPUHP_STATE,
"tracectr_cpu_hotplug",
tracectr_cpu_hotplug_coming_up,
NULL);
return 0;
}
int __exit exit_tracecounters(void)
{
cpuhp_remove_state_nocalls(USE_CPUHP_STATE);
return 0;
}
late_initcall(init_tracecounters);

View File

@@ -1,110 +0,0 @@
/* Copyright (c) 2013-2014,2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM perf_trace_counters
#if !defined(_PERF_TRACE_COUNTERS_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _PERF_TRACE_COUNTERS_H_
/* Ctr index for PMCNTENSET/CLR */
#define CC 0x80000000
#define C0 0x1
#define C1 0x2
#define C2 0x4
#define C3 0x8
#define C4 0x10
#define C5 0x20
#define C_ALL (CC | C0 | C1 | C2 | C3 | C4 | C5)
#define NUM_L1_CTRS 6
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/tracepoint.h>
DECLARE_PER_CPU(u32, cntenset_val);
DECLARE_PER_CPU(u32, previous_ccnt);
DECLARE_PER_CPU(u32[NUM_L1_CTRS], previous_l1_cnts);
TRACE_EVENT(sched_switch_with_ctrs,
TP_PROTO(pid_t prev, pid_t next),
TP_ARGS(prev, next),
TP_STRUCT__entry(
__field(pid_t, old_pid)
__field(pid_t, new_pid)
__field(u32, cctr)
__field(u32, ctr0)
__field(u32, ctr1)
__field(u32, ctr2)
__field(u32, ctr3)
__field(u32, ctr4)
__field(u32, ctr5)
),
TP_fast_assign(
u32 cpu = smp_processor_id();
u32 i;
u32 cnten_val;
u32 total_ccnt = 0;
u32 total_cnt = 0;
u32 delta_l1_cnts[NUM_L1_CTRS];
__entry->old_pid = prev;
__entry->new_pid = next;
cnten_val = per_cpu(cntenset_val, cpu);
if (cnten_val & CC) {
/* Read value */
total_ccnt = read_sysreg(pmccntr_el0);
__entry->cctr = total_ccnt -
per_cpu(previous_ccnt, cpu);
per_cpu(previous_ccnt, cpu) = total_ccnt;
}
for (i = 0; i < NUM_L1_CTRS; i++) {
if (cnten_val & (1 << i)) {
/* Select */
write_sysreg(i, pmselr_el0);
isb();
/* Read value */
total_cnt = read_sysreg(pmxevcntr_el0);
delta_l1_cnts[i] = total_cnt -
per_cpu(previous_l1_cnts[i], cpu);
per_cpu(previous_l1_cnts[i], cpu) =
total_cnt;
} else
delta_l1_cnts[i] = 0;
}
__entry->ctr0 = delta_l1_cnts[0];
__entry->ctr1 = delta_l1_cnts[1];
__entry->ctr2 = delta_l1_cnts[2];
__entry->ctr3 = delta_l1_cnts[3];
__entry->ctr4 = delta_l1_cnts[4];
__entry->ctr5 = delta_l1_cnts[5];
),
TP_printk("prev_pid=%d, next_pid=%d, CCNTR: %u, CTR0: %u, CTR1: %u, CTR2: %u, CTR3: %u, CTR4: %u, CTR5: %u",
__entry->old_pid, __entry->new_pid,
__entry->cctr,
__entry->ctr0, __entry->ctr1,
__entry->ctr2, __entry->ctr3,
__entry->ctr4, __entry->ctr5)
);
#endif
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../../arch/arm64/kernel
#define TRACE_INCLUDE_FILE perf_trace_counters
#include <trace/define_trace.h>

View File

@@ -1,96 +0,0 @@
/* Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/perf_event.h>
#include <linux/types.h>
#include <linux/tracepoint.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <linux/preempt.h>
#include <linux/stat.h>
#include <asm/uaccess.h>
#define CREATE_TRACE_POINTS
#include "perf_trace_user.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM perf_trace_counters
#define TRACE_USER_MAX_BUF_SIZE 100
static ssize_t perf_trace_write(struct file *file,
const char __user *user_string_in,
size_t len, loff_t *ppos)
{
u32 cnten_val;
int rc;
char buf[TRACE_USER_MAX_BUF_SIZE + 1];
ssize_t length;
if (len == 0)
return 0;
length = len > TRACE_USER_MAX_BUF_SIZE ? TRACE_USER_MAX_BUF_SIZE : len;
rc = copy_from_user(buf, user_string_in, length);
if (rc) {
pr_err("%s copy_from_user failed, rc=%d\n", __func__, rc);
return -EFAULT;
}
/* Remove any trailing newline and make sure string is terminated */
if (buf[length - 1] == '\n')
buf[length - 1] = '\0';
else
buf[length] = '\0';
/*
* Disable preemption to ensure that all the performance counter
* accesses happen on the same cpu
*/
preempt_disable();
/* stop counters, call the trace function, restart them */
cnten_val = read_sysreg(pmcntenset_el0);
/* Disable all the counters that were enabled */
write_sysreg(cnten_val, pmcntenclr_el0);
trace_perf_trace_user(buf, cnten_val);
/* Enable all the counters that were disabled */
write_sysreg(cnten_val, pmcntenset_el0);
preempt_enable();
return length;
}
static const struct file_operations perf_trace_fops = {
.write = perf_trace_write
};
static int __init init_perf_trace(void)
{
struct dentry *dir;
struct dentry *file;
unsigned int value = 1;
dir = debugfs_create_dir("msm_perf", NULL);
if (!dir)
return -ENOMEM;
file = debugfs_create_file("trace_marker", 0220, dir,
&value, &perf_trace_fops);
if (!file)
return -ENOMEM;
return 0;
}
late_initcall(init_perf_trace);

View File

@@ -1,84 +0,0 @@
/* Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#if !defined(_PERF_TRACE_USER_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _PERF_TRACE_USER_H_
#undef TRACE_SYSTEM
#define TRACE_SYSTEM perf_trace_counters
#include <linux/tracepoint.h>
#define CNTENSET_CC 0x80000000
#define NUM_L1_CTRS 6
TRACE_EVENT(perf_trace_user,
TP_PROTO(char *string, u32 cnten_val),
TP_ARGS(string, cnten_val),
TP_STRUCT__entry(
__field(u32, cctr)
__field(u32, ctr0)
__field(u32, ctr1)
__field(u32, ctr2)
__field(u32, ctr3)
__field(u32, ctr4)
__field(u32, ctr5)
__string(user_string, string)
),
TP_fast_assign(
u32 cnt;
u32 l1_cnts[NUM_L1_CTRS];
int i;
if (cnten_val & CNTENSET_CC) {
/* Read value */
cnt = read_sysreg(pmccntr_el0);
__entry->cctr = cnt;
} else
__entry->cctr = 0;
for (i = 0; i < NUM_L1_CTRS; i++) {
if (cnten_val & (1 << i)) {
/* Select */
write_sysreg(i, pmselr_el0);
isb();
/* Read value */
cnt = read_sysreg(pmxevcntr_el0);
l1_cnts[i] = cnt;
} else {
l1_cnts[i] = 0;
}
}
__entry->ctr0 = l1_cnts[0];
__entry->ctr1 = l1_cnts[1];
__entry->ctr2 = l1_cnts[2];
__entry->ctr3 = l1_cnts[3];
__entry->ctr4 = l1_cnts[4];
__entry->ctr5 = l1_cnts[5];
__assign_str(user_string, string);
),
TP_printk("CCNTR: %u, CTR0: %u, CTR1: %u, CTR2: %u, CTR3: %u, CTR4: %u, CTR5: %u, MSG=%s",
__entry->cctr,
__entry->ctr0, __entry->ctr1,
__entry->ctr2, __entry->ctr3,
__entry->ctr4, __entry->ctr5,
__get_str(user_string)
)
);
#endif
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../../arch/arm64/kernel
#define TRACE_INCLUDE_FILE perf_trace_user
#include <trace/define_trace.h>

View File

@@ -653,6 +653,7 @@ void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
return (void *)orig_ret_address;
}
#ifdef CONFIG_KRETPROBES
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
@@ -666,6 +667,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
return 0;
}
#endif
int __init arch_init_kprobes(void)
{

View File

@@ -195,8 +195,7 @@ static int uprobe_single_step_handler(struct pt_regs *regs,
/* uprobe breakpoint handler hook */
static struct break_hook uprobes_break_hook = {
.esr_mask = BRK64_ESR_MASK,
.esr_val = BRK64_ESR_UPROBES,
.imm = BRK64_ESR_UPROBES,
.fn = uprobe_breakpoint_handler,
};
@@ -207,8 +206,8 @@ static struct step_hook uprobes_step_hook = {
static int __init arch_init_uprobes(void)
{
register_break_hook(&uprobes_break_hook);
register_step_hook(&uprobes_step_hook);
register_user_break_hook(&uprobes_break_hook);
register_user_step_hook(&uprobes_step_hook);
return 0;
}

View File

@@ -793,12 +793,10 @@ void arch_send_call_function_single_ipi(int cpu)
smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC);
}
#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
{
smp_cross_call_common(mask, IPI_WAKEUP);
}
#endif
#ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise(void)
@@ -916,13 +914,8 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
break;
#endif
#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
case IPI_WAKEUP:
WARN_ONCE(!acpi_parking_protocol_valid(cpu),
"CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
cpu);
break;
#endif
default:
pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);

View File

@@ -858,9 +858,8 @@ static int bug_handler(struct pt_regs *regs, unsigned int esr)
}
static struct break_hook bug_break_hook = {
.esr_val = 0xf2000000 | BUG_BRK_IMM,
.esr_mask = 0xffffffff,
.fn = bug_handler,
.imm = BUG_BRK_IMM,
};
#ifdef CONFIG_KASAN_SW_TAGS
@@ -929,11 +928,48 @@ int __init early_brk64(unsigned long addr, unsigned int esr,
return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
}
static int refcount_overflow_handler(struct pt_regs *regs, unsigned int esr)
{
u32 dummy_cbz = le32_to_cpup((__le32 *)(regs->pc + 4));
bool zero = regs->pstate & PSR_Z_BIT;
u32 rt;
/*
* Find the register that holds the counter address from the
* dummy 'cbz' instruction that follows the 'brk' instruction
* that sent us here.
*/
rt = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, dummy_cbz);
/* First unconditionally saturate the refcount. */
*(int *)regs->regs[rt] = INT_MIN / 2;
/*
* This function has been called because either a negative refcount
* value was seen by any of the refcount functions, or a zero
* refcount value was seen by refcount_{add,dec}().
*/
/* point pc to the branch instruction that detected the overflow */
regs->pc += 4 + aarch64_get_branch_offset(dummy_cbz);
refcount_error_report(regs, zero ? "hit zero" : "overflow");
/* advance pc and proceed */
regs->pc += 4;
return DBG_HOOK_HANDLED;
}
static struct break_hook refcount_break_hook = {
.fn = refcount_overflow_handler,
.imm = REFCOUNT_BRK_IMM,
};
/* This registration must happen early, before debug_traps_init(). */
void __init trap_init(void)
{
register_break_hook(&bug_break_hook);
#ifdef CONFIG_KASAN_SW_TAGS
register_break_hook(&kasan_break_hook);
#endif
register_kernel_break_hook(&bug_break_hook);
register_kernel_break_hook(&refcount_break_hook);
}

View File

@@ -193,6 +193,8 @@ static int __init vdso_mappings_init(const char *name,
if (vdso_pagelist == NULL)
return -ENOMEM;
kmemleak_not_leak(vdso_pagelist);
/* Grab the vDSO data page. */
vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));

View File

@@ -5,7 +5,7 @@
# A mix between the arm64 and arm vDSO Makefiles.
ifeq ($(cc-name),clang)
CC_ARM32 := $(CC) $(CLANG_TARGET_ARM32) -no-integrated-as
CC_ARM32 := $(CC) $(CLANG_TARGET_ARM32) -no-integrated-as $(CLANG_GCC32_TC) $(CLANG_PREFIX32)
GCC_ARM32_TC := $(realpath $(dir $(shell which $(CROSS_COMPILE_ARM32)ld))/..)
ifneq ($(GCC_ARM32_TC),)
CC_ARM32 += --gcc-toolchain=$(GCC_ARM32_TC)

View File

@@ -107,6 +107,8 @@ SECTIONS
.head.text : {
_text = .;
PROVIDE(__efistub__text = .);
HEAD_TEXT
}
.text : { /* Real text segment */
@@ -222,6 +224,7 @@ SECTIONS
PECOFF_EDATA_PADDING
__pecoff_data_rawsize = ABSOLUTE(. - __initdata_begin);
_edata = .;
PROVIDE(__efistub__edata = .);
BSS_SECTION(0, 0, 0)
@@ -243,12 +246,15 @@ SECTIONS
__pecoff_data_size = ABSOLUTE(. - __initdata_begin);
_end = .;
PROVIDE(__efistub__end = .);
STABS_DEBUG
HEAD_SYMBOLS
}
PROVIDE(__efistub_stext_offset = stext - _text);
/*
* The HYP init code and ID map text can't be longer than a page each,
* and should not cross a page boundary.

View File

@@ -1,3 +1,15 @@
#include <asm/atomic.h>
#define __ARM64_IN_ATOMIC_IMPL
/*
* Disarm the refcount checks in the out-of-line LL/SC routines. These are
* redundant, given that the LSE callers already perform the same checks.
* We do have to make sure that we exit with a zero value if the pre-check
* detected a zero value.
*/
#undef REFCOUNT_POST_CHECK_NEG
#undef REFCOUNT_POST_CHECK_NEG_OR_ZERO
#define REFCOUNT_POST_CHECK_NEG
#define REFCOUNT_POST_CHECK_NEG_OR_ZERO "csel %w[val], wzr, %w[val], eq\n"
#include <asm/atomic_ll_sc.h>

View File

@@ -296,9 +296,6 @@ config ZONE_DMA32
config AUDIT_ARCH
def_bool y if X86_64
config ARCH_SUPPORTS_OPTIMIZED_INLINING
def_bool y
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y

View File

@@ -284,20 +284,6 @@ config CPA_DEBUG
---help---
Do change_page_attr() self-tests every 30 seconds.
config OPTIMIZE_INLINING
bool "Allow gcc to uninline functions marked 'inline'"
---help---
This option determines if the kernel forces gcc to inline the functions
developers have marked 'inline'. Doing so takes away freedom from gcc to
do what it thinks is best, which is desirable for the gcc 3.x series of
compilers. The gcc 4.x series have a rewritten inlining algorithm and
enabling this option will generate a smaller kernel there. Hopefully
this algorithm is so good that allowing gcc 4.x and above to make the
decision will become the default in the future. Until then this option
is there to test gcc for this.
If unsure, say N.
config DEBUG_ENTRY
bool "Debug low-level entry code"
depends on DEBUG_KERNEL

View File

@@ -5,7 +5,6 @@
#include <asm/unwind.h>
#include <asm/orc_types.h>
#include <asm/orc_lookup.h>
#include <asm/sections.h>
#define orc_warn(fmt, ...) \
printk_deferred_once(KERN_WARNING pr_fmt("WARNING: " fmt), ##__VA_ARGS__)
@@ -120,7 +119,7 @@ static struct orc_entry *orc_find(unsigned long ip)
}
/* vmlinux .init slow lookup: */
if (ip >= (unsigned long)_sinittext && ip < (unsigned long)_einittext)
if (init_kernel_text(ip))
return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
__stop_orc_unwind_ip - __start_orc_unwind_ip, ip);

View File

@@ -1429,7 +1429,11 @@ retry:
trace_block_sleeprq(q, bio, op);
spin_unlock_irq(q->queue_lock);
io_schedule();
/*
* FIXME: this should be io_schedule(). The timeout is there as a
* workaround for some io timeout problems.
*/
io_schedule_timeout(5*HZ);
/*
* After sleeping, we become a "batching" process and will be able

View File

@@ -265,9 +265,7 @@ extern int blk_update_nr_requests(struct request_queue *, unsigned int);
*/
static inline int blk_do_io_stat(struct request *rq)
{
return rq->rq_disk &&
(rq->rq_flags & RQF_IO_STAT) &&
!blk_rq_is_passthrough(rq);
return false;
}
static inline void req_set_nomerge(struct request_queue *q, struct request *req)

View File

@@ -23,12 +23,12 @@
* tunables
*/
/* max queue in one round of service */
static const int cfq_quantum = 8;
static const int cfq_quantum = 16;
static const u64 cfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
/* maximum backwards seek, in KiB */
static const int cfq_back_max = 16 * 1024;
/* penalty of a backwards seek */
static const int cfq_back_penalty = 2;
static const int cfq_back_penalty = 1;
static const u64 cfq_slice_sync = NSEC_PER_SEC / 10;
static u64 cfq_slice_async = NSEC_PER_SEC / 25;
static const int cfq_slice_async_rq = 2;

View File

@@ -692,38 +692,9 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
if (bd && bd == bd->bd_contains)
return 0;
/* Actually none of these is particularly useful on a partition,
* but they are safe.
*/
switch (cmd) {
case SCSI_IOCTL_GET_IDLUN:
case SCSI_IOCTL_GET_BUS_NUMBER:
case SCSI_IOCTL_GET_PCI:
case SCSI_IOCTL_PROBE_HOST:
case SG_GET_VERSION_NUM:
case SG_SET_TIMEOUT:
case SG_GET_TIMEOUT:
case SG_GET_RESERVED_SIZE:
case SG_SET_RESERVED_SIZE:
case SG_EMULATED_HOST:
return 0;
case CDROM_GET_CAPABILITY:
/* Keep this until we remove the printk below. udev sends it
* and we do not want to spam dmesg about it. CD-ROMs do
* not have partitions, so we get here only for disks.
*/
return -ENOIOCTLCMD;
default:
break;
}
if (capable(CAP_SYS_RAWIO))
return 0;
/* In particular, rule out all resets and host-specific ioctls. */
printk_ratelimited(KERN_WARNING
"%s: sending ioctl %x to a partition!\n", current->comm, cmd);
return -ENOIOCTLCMD;
}
EXPORT_SYMBOL(scsi_verify_blk_ioctl);

View File

@@ -123,8 +123,7 @@ enum {
BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
BINDER_DEBUG_SPINLOCKS = 1U << 14,
};
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
static uint32_t binder_debug_mask = 0;
module_param_named(debug_mask, binder_debug_mask, uint, 0644);
char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
@@ -204,6 +203,14 @@ static inline void binder_stats_created(enum binder_stat_types type)
struct binder_transaction_log binder_transaction_log;
struct binder_transaction_log binder_transaction_log_failed;
static struct kmem_cache *binder_node_pool;
static struct kmem_cache *binder_proc_pool;
static struct kmem_cache *binder_ref_death_pool;
static struct kmem_cache *binder_ref_pool;
static struct kmem_cache *binder_thread_pool;
static struct kmem_cache *binder_transaction_pool;
static struct kmem_cache *binder_work_pool;
static struct binder_transaction_log_entry *binder_transaction_log_add(
struct binder_transaction_log *log)
{
@@ -1351,9 +1358,9 @@ static struct binder_node *binder_init_node_ilocked(
static struct binder_node *binder_new_node(struct binder_proc *proc,
struct flat_binder_object *fp)
{
struct binder_node *node;
struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
struct binder_node *node, *new_node;
new_node = kmem_cache_zalloc(binder_node_pool, GFP_KERNEL);
if (!new_node)
return NULL;
binder_inner_proc_lock(proc);
@@ -1363,14 +1370,14 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
/*
* The node was already added by another thread
*/
kfree(new_node);
kmem_cache_free(binder_node_pool, new_node);
return node;
}
static void binder_free_node(struct binder_node *node)
{
kfree(node);
kmem_cache_free(binder_node_pool, node);
binder_stats_deleted(BINDER_STAT_NODE);
}
@@ -1857,8 +1864,9 @@ static void binder_free_ref(struct binder_ref *ref)
{
if (ref->node)
binder_free_node(ref->node);
kfree(ref->death);
kfree(ref);
if (ref->death)
kmem_cache_free(binder_ref_death_pool, ref->death);
kmem_cache_free(binder_ref_pool, ref);
}
/**
@@ -1951,7 +1959,7 @@ static int binder_inc_ref_for_node(struct binder_proc *proc,
ref = binder_get_ref_for_node_olocked(proc, node, NULL);
if (!ref) {
binder_proc_unlock(proc);
new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
new_ref = kmem_cache_zalloc(binder_ref_pool, GFP_KERNEL);
if (!new_ref)
return -ENOMEM;
binder_proc_lock(proc);
@@ -1965,7 +1973,7 @@ static int binder_inc_ref_for_node(struct binder_proc *proc,
* Another thread created the ref first so
* free the one we allocated
*/
kfree(new_ref);
kmem_cache_free(binder_ref_pool, new_ref);
return ret;
}
@@ -2100,7 +2108,7 @@ static void binder_free_transaction(struct binder_transaction *t)
* If the transaction has no target_proc, then
* t->buffer->transaction has already been cleared.
*/
kfree(t);
kmem_cache_free(binder_transaction_pool, t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
@@ -3130,7 +3138,7 @@ static void binder_transaction(struct binder_proc *proc,
e->to_proc = target_proc->pid;
/* TODO: reuse incoming transaction for reply */
t = kzalloc(sizeof(*t), GFP_KERNEL);
t = kmem_cache_zalloc(binder_transaction_pool, GFP_KERNEL);
if (t == NULL) {
return_error = BR_FAILED_REPLY;
return_error_param = -ENOMEM;
@@ -3140,7 +3148,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_stats_created(BINDER_STAT_TRANSACTION);
spin_lock_init(&t->lock);
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
tcomplete = kmem_cache_zalloc(binder_work_pool, GFP_KERNEL);
if (tcomplete == NULL) {
return_error = BR_FAILED_REPLY;
return_error_param = -ENOMEM;
@@ -3563,10 +3571,10 @@ err_bad_extra_size:
if (secctx)
security_release_secctx(secctx, secctx_sz);
err_get_secctx_failed:
kfree(tcomplete);
kmem_cache_free(binder_work_pool, tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
err_alloc_tcomplete_failed:
kfree(t);
kmem_cache_free(binder_transaction_pool, t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
err_alloc_t_failed:
err_bad_call_stack:
@@ -3918,7 +3926,7 @@ static int binder_thread_write(struct binder_proc *proc,
* Allocate memory for death notification
* before taking lock
*/
death = kzalloc(sizeof(*death), GFP_KERNEL);
death = kmem_cache_zalloc(binder_ref_death_pool, GFP_KERNEL);
if (death == NULL) {
WARN_ON(thread->return_error.cmd !=
BR_OK);
@@ -3943,7 +3951,8 @@ static int binder_thread_write(struct binder_proc *proc,
"BC_CLEAR_DEATH_NOTIFICATION",
target);
binder_proc_unlock(proc);
kfree(death);
if (death)
kmem_cache_free(binder_ref_death_pool, death);
break;
}
@@ -3964,7 +3973,7 @@ static int binder_thread_write(struct binder_proc *proc,
proc->pid, thread->pid);
binder_node_unlock(ref->node);
binder_proc_unlock(proc);
kfree(death);
kmem_cache_free(binder_ref_death_pool, death);
break;
}
binder_stats_created(BINDER_STAT_DEATH);
@@ -4264,7 +4273,7 @@ retry:
case BINDER_WORK_TRANSACTION_COMPLETE: {
binder_inner_proc_unlock(proc);
cmd = BR_TRANSACTION_COMPLETE;
kfree(w);
kmem_cache_free(binder_work_pool, w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
@@ -4385,7 +4394,7 @@ retry:
(u64)cookie);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
binder_inner_proc_unlock(proc);
kfree(death);
kmem_cache_free(binder_ref_death_pool, death);
binder_stats_deleted(BINDER_STAT_DEATH);
} else {
binder_enqueue_work_ilocked(
@@ -4555,7 +4564,7 @@ static void binder_release_work(struct binder_proc *proc,
case BINDER_WORK_TRANSACTION_COMPLETE: {
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered TRANSACTION_COMPLETE\n");
kfree(w);
kmem_cache_free(binder_work_pool, w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
@@ -4566,7 +4575,7 @@ static void binder_release_work(struct binder_proc *proc,
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered death notification, %016llx\n",
(u64)death->cookie);
kfree(death);
kmem_cache_free(binder_ref_death_pool, death);
binder_stats_deleted(BINDER_STAT_DEATH);
} break;
default:
@@ -4627,14 +4636,14 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
thread = binder_get_thread_ilocked(proc, NULL);
binder_inner_proc_unlock(proc);
if (!thread) {
new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
new_thread = kmem_cache_zalloc(binder_thread_pool, GFP_KERNEL);
if (new_thread == NULL)
return NULL;
binder_inner_proc_lock(proc);
thread = binder_get_thread_ilocked(proc, new_thread);
binder_inner_proc_unlock(proc);
if (thread != new_thread)
kfree(new_thread);
kmem_cache_free(binder_thread_pool, new_thread);
}
return thread;
}
@@ -4646,7 +4655,7 @@ static void binder_free_proc(struct binder_proc *proc)
binder_alloc_deferred_release(&proc->alloc);
put_task_struct(proc->tsk);
binder_stats_deleted(BINDER_STAT_PROC);
kfree(proc);
kmem_cache_free(binder_proc_pool, proc);
}
static void binder_free_thread(struct binder_thread *thread)
@@ -4655,7 +4664,7 @@ static void binder_free_thread(struct binder_thread *thread)
binder_stats_deleted(BINDER_STAT_THREAD);
binder_proc_dec_tmpref(thread->proc);
put_task_struct(thread->task);
kfree(thread);
kmem_cache_free(binder_thread_pool, thread);
}
static int binder_thread_release(struct binder_proc *proc,
@@ -5166,7 +5175,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
current->group_leader->pid, current->pid);
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
proc = kmem_cache_zalloc(binder_proc_pool, GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
spin_lock_init(&proc->inner_lock);
@@ -6082,6 +6091,73 @@ static int __init init_binder_device(const char *name)
return ret;
}
static int __init binder_create_pools(void)
{
int ret;
ret = binder_buffer_pool_create();
if (ret)
return ret;
binder_node_pool = KMEM_CACHE(binder_node, SLAB_HWCACHE_ALIGN);
if (!binder_node_pool)
goto err_node_pool;
binder_proc_pool = KMEM_CACHE(binder_proc, SLAB_HWCACHE_ALIGN);
if (!binder_proc_pool)
goto err_proc_pool;
binder_ref_death_pool = KMEM_CACHE(binder_ref_death, SLAB_HWCACHE_ALIGN);
if (!binder_ref_death_pool)
goto err_ref_death_pool;
binder_ref_pool = KMEM_CACHE(binder_ref, SLAB_HWCACHE_ALIGN);
if (!binder_ref_pool)
goto err_ref_pool;
binder_thread_pool = KMEM_CACHE(binder_thread, SLAB_HWCACHE_ALIGN);
if (!binder_thread_pool)
goto err_thread_pool;
binder_transaction_pool = KMEM_CACHE(binder_transaction, SLAB_HWCACHE_ALIGN);
if (!binder_transaction_pool)
goto err_transaction_pool;
binder_work_pool = KMEM_CACHE(binder_work, SLAB_HWCACHE_ALIGN);
if (!binder_work_pool)
goto err_work_pool;
return 0;
err_work_pool:
kmem_cache_destroy(binder_transaction_pool);
err_transaction_pool:
kmem_cache_destroy(binder_thread_pool);
err_thread_pool:
kmem_cache_destroy(binder_ref_pool);
err_ref_pool:
kmem_cache_destroy(binder_ref_death_pool);
err_ref_death_pool:
kmem_cache_destroy(binder_proc_pool);
err_proc_pool:
kmem_cache_destroy(binder_node_pool);
err_node_pool:
binder_buffer_pool_destroy();
return -ENOMEM;
}
static void __init binder_destroy_pools(void)
{
binder_buffer_pool_destroy();
kmem_cache_destroy(binder_node_pool);
kmem_cache_destroy(binder_proc_pool);
kmem_cache_destroy(binder_ref_death_pool);
kmem_cache_destroy(binder_ref_pool);
kmem_cache_destroy(binder_thread_pool);
kmem_cache_destroy(binder_transaction_pool);
kmem_cache_destroy(binder_work_pool);
}
static int __init binder_init(void)
{
int ret;
@@ -6090,10 +6166,14 @@ static int __init binder_init(void)
struct hlist_node *tmp;
char *device_names = NULL;
ret = binder_alloc_shrinker_init();
ret = binder_create_pools();
if (ret)
return ret;
ret = binder_alloc_shrinker_init();
if (ret)
goto err_alloc_shrinker_failed;
atomic_set(&binder_transaction_log.cur, ~0U);
atomic_set(&binder_transaction_log_failed.cur, ~0U);
@@ -6168,6 +6248,9 @@ err_init_binder_device_failed:
err_alloc_device_names_failed:
debugfs_remove_recursive(binder_debugfs_dir_entry_root);
err_alloc_shrinker_failed:
binder_destroy_pools();
return ret;
}

View File

@@ -53,6 +53,22 @@ module_param_named(debug_mask, binder_alloc_debug_mask,
pr_info(x); \
} while (0)
static struct kmem_cache *binder_buffer_pool;
int binder_buffer_pool_create(void)
{
binder_buffer_pool = KMEM_CACHE(binder_buffer, SLAB_HWCACHE_ALIGN);
if (!binder_buffer_pool)
return -ENOMEM;
return 0;
}
void binder_buffer_pool_destroy(void)
{
kmem_cache_destroy(binder_buffer_pool);
}
static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
{
return list_entry(buffer->entry.next, struct binder_buffer, entry);
@@ -464,7 +480,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
if (buffer_size != size) {
struct binder_buffer *new_buffer;
new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
new_buffer = kmem_cache_zalloc(binder_buffer_pool, GFP_KERNEL);
if (!new_buffer) {
pr_err("%s: %d failed to alloc new buffer struct\n",
__func__, alloc->pid);
@@ -588,7 +604,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
buffer_start_page(buffer) + PAGE_SIZE);
}
list_del(&buffer->entry);
kfree(buffer);
kmem_cache_free(binder_buffer_pool, buffer);
}
static void binder_free_buf_locked(struct binder_alloc *alloc,
@@ -702,7 +718,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
}
alloc->buffer_size = vma->vm_end - vma->vm_start;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
buffer = kmem_cache_zalloc(binder_buffer_pool, GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
failure_string = "alloc buffer struct";
@@ -760,7 +776,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
list_del(&buffer->entry);
WARN_ON_ONCE(!list_empty(&alloc->buffers));
kfree(buffer);
kmem_cache_free(binder_buffer_pool, buffer);
}
page_count = 0;

View File

@@ -143,6 +143,8 @@ extern void binder_alloc_print_allocated(struct seq_file *m,
struct binder_alloc *alloc);
void binder_alloc_print_pages(struct seq_file *m,
struct binder_alloc *alloc);
extern int binder_buffer_pool_create(void);
extern void binder_buffer_pool_destroy(void);
/**
* binder_alloc_get_free_async_space() - get free space available for async

View File

@@ -7,3 +7,4 @@ obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
CFLAGS_wakeup.o += -DCONFIG_DEBUG_FS

View File

@@ -961,7 +961,7 @@ bool pm_wakeup_pending(void)
pm_get_active_wakeup_sources(suspend_abort,
MAX_SUSPEND_ABORT_LEN);
log_suspend_abort_reason(suspend_abort);
pr_info("PM: %s\n", suspend_abort);
pr_debug("PM: %s\n", suspend_abort);
}
return ret || atomic_read(&pm_abort_suspend) > 0;

View File

@@ -859,7 +859,7 @@ static inline void mhi_timesync_log(struct mhi_controller *mhi_cntrl)
if (mhi_tsync && mhi_cntrl->tsync_log)
mhi_cntrl->tsync_log(mhi_cntrl,
readq_no_log(mhi_tsync->time_reg));
readq(mhi_tsync->time_reg));
}
/* memory allocation methods */

View File

@@ -2619,7 +2619,7 @@ int mhi_get_remote_time_sync(struct mhi_device *mhi_dev,
local_irq_disable();
*t_host = mhi_cntrl->time_get(mhi_cntrl, mhi_cntrl->priv_data);
*t_dev = readq_relaxed_no_log(mhi_tsync->time_reg);
*t_dev = readq_relaxed(mhi_tsync->time_reg);
local_irq_enable();
preempt_enable();
@@ -2720,7 +2720,7 @@ int mhi_get_remote_time(struct mhi_device *mhi_dev,
mhi_tsync->local_time =
mhi_cntrl->time_get(mhi_cntrl, mhi_cntrl->priv_data);
writel_relaxed_no_log(mhi_tsync->int_sequence, mhi_cntrl->tsync_db);
writel_relaxed(mhi_tsync->int_sequence, mhi_cntrl->tsync_db);
/* write must go thru immediately */
wmb();

View File

@@ -36,39 +36,21 @@
#define MSG_VERB(fmt, ...) do { \
if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_VERBOSE) \
pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__);\
if (mhi_netdev->ipc_log && (*mhi_netdev->ipc_log_lvl <= \
MHI_MSG_LVL_VERBOSE)) \
ipc_log_string(mhi_netdev->ipc_log, "[D][%s] " fmt, \
__func__, ##__VA_ARGS__); \
} while (0)
#else
#define MSG_VERB(fmt, ...) do { \
if (mhi_netdev->ipc_log && (*mhi_netdev->ipc_log_lvl <= \
MHI_MSG_LVL_VERBOSE)) \
ipc_log_string(mhi_netdev->ipc_log, "[D][%s] " fmt, \
__func__, ##__VA_ARGS__); \
} while (0)
#define MSG_VERB(fmt, ...) ((void)0)
#endif
#define MSG_LOG(fmt, ...) do { \
if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_INFO) \
pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__);\
if (mhi_netdev->ipc_log && (*mhi_netdev->ipc_log_lvl <= \
MHI_MSG_LVL_INFO)) \
ipc_log_string(mhi_netdev->ipc_log, "[I][%s] " fmt, \
__func__, ##__VA_ARGS__); \
} while (0)
#define MSG_ERR(fmt, ...) do { \
if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_ERROR) \
pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \
if (mhi_netdev->ipc_log && (*mhi_netdev->ipc_log_lvl <= \
MHI_MSG_LVL_ERROR)) \
ipc_log_string(mhi_netdev->ipc_log, "[E][%s] " fmt, \
__func__, ##__VA_ARGS__); \
} while (0)
#define MHI_ASSERT(cond, msg) do { \

View File

@@ -78,40 +78,22 @@ enum MHI_DEBUG_LEVEL msg_lvl = MHI_MSG_LVL_ERROR;
#define MSG_VERB(fmt, ...) do { \
if (msg_lvl <= MHI_MSG_LVL_VERBOSE) \
pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__); \
if (uci_dev->ipc_log && uci_dev->ipc_log_lvl && \
(*uci_dev->ipc_log_lvl <= MHI_MSG_LVL_VERBOSE)) \
ipc_log_string(uci_dev->ipc_log, \
"[D][%s] " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#else
#define MHI_UCI_IPC_LOG_PAGES (1)
#define MSG_VERB(fmt, ...) do { \
if (uci_dev->ipc_log && uci_dev->ipc_log_lvl && \
(*uci_dev->ipc_log_lvl <= MHI_MSG_LVL_VERBOSE)) \
ipc_log_string(uci_dev->ipc_log, \
"[D][%s] " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define MSG_VERB(fmt, ...) ((void)0)
#endif
#define MSG_LOG(fmt, ...) do { \
if (msg_lvl <= MHI_MSG_LVL_INFO) \
pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__); \
if (uci_dev->ipc_log && uci_dev->ipc_log_lvl && \
(*uci_dev->ipc_log_lvl <= MHI_MSG_LVL_INFO)) \
ipc_log_string(uci_dev->ipc_log, "[I][%s] " fmt, \
__func__, ##__VA_ARGS__); \
} while (0)
#define MSG_ERR(fmt, ...) do { \
if (msg_lvl <= MHI_MSG_LVL_ERROR) \
pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \
if (uci_dev->ipc_log && uci_dev->ipc_log_lvl && \
(*uci_dev->ipc_log_lvl <= MHI_MSG_LVL_ERROR)) \
ipc_log_string(uci_dev->ipc_log, "[E][%s] " fmt, \
__func__, ##__VA_ARGS__); \
} while (0)
#define MAX_UCI_DEVICES (64)

View File

@@ -775,7 +775,9 @@ static ssize_t diag_dbgfs_write_debug(struct file *fp, const char __user *buf,
if (value < 0)
return -EINVAL;
#ifdef DIAG_DEBUG
diag_debug_mask = (uint16_t)value;
#endif
return count;
}
#endif

View File

@@ -30,16 +30,7 @@
#ifdef CONFIG_IPC_LOGGING
extern uint16_t diag_debug_mask;
extern void *diag_ipc_log;
#define DIAG_LOG(log_lvl, msg, ...) \
do { \
if (diag_ipc_log && (log_lvl & diag_debug_mask)) { \
ipc_log_string(diag_ipc_log, \
"[%s] " msg, __func__, ##__VA_ARGS__); \
} \
} while (0)
#else
#define DIAG_LOG(log_lvl, msg, ...)
#endif
#define DIAG_LOG(log_lvl, msg, ...) ((void)0)
#endif

View File

@@ -1321,7 +1321,7 @@ int diag_send_peripheral_buffering_mode(struct diag_buffering_mode_t *params)
}
if (!driver->buffering_flag[params->peripheral]) {
pr_err("diag: In %s, buffering flag not set for %d\n", __func__,
pr_debug("diag: In %s, buffering flag not set for %d\n", __func__,
params->peripheral);
return -EINVAL;
}

View File

@@ -86,18 +86,11 @@ enum {
SMD_PKT_INFO = 1U << 0,
};
#define SMD_PKT_INFO(x, ...) \
do { \
if (smd_pkt_debug_mask & SMD_PKT_INFO) { \
ipc_log_string(smd_pkt_ilctxt, \
"[%s]: "x, __func__, ##__VA_ARGS__); \
} \
} while (0)
#define SMD_PKT_INFO(x, ...) ((void)0)
#define SMD_PKT_ERR(x, ...) \
do { \
pr_err_ratelimited("[%s]: "x, __func__, ##__VA_ARGS__); \
ipc_log_string(smd_pkt_ilctxt, "[%s]: "x, __func__, ##__VA_ARGS__); \
} while (0)
#define SMD_PKT_IOCTL_QUEUE_RX_INTENT \

View File

@@ -470,7 +470,6 @@ struct entropy_store {
unsigned short add_ptr;
unsigned short input_rotate;
int entropy_count;
int entropy_total;
unsigned int initialized:1;
unsigned int last_data_init:1;
__u8 last_data[EXTRACT_SIZE];
@@ -643,7 +642,7 @@ static void process_random_ready_list(void)
*/
static void credit_entropy_bits(struct entropy_store *r, int nbits)
{
int entropy_count, orig;
int entropy_count, orig, has_initialized = 0;
const int pool_size = r->poolinfo->poolfracbits;
int nfrac = nbits << ENTROPY_SHIFT;
@@ -698,47 +697,53 @@ retry:
entropy_count = 0;
} else if (entropy_count > pool_size)
entropy_count = pool_size;
if ((r == &blocking_pool) && !r->initialized &&
(entropy_count >> ENTROPY_SHIFT) > 128)
has_initialized = 1;
if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
goto retry;
r->entropy_total += nbits;
if (!r->initialized && r->entropy_total > 128) {
if (has_initialized) {
r->initialized = 1;
r->entropy_total = 0;
wake_up_interruptible(&random_read_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
}
trace_credit_entropy_bits(r->name, nbits,
entropy_count >> ENTROPY_SHIFT,
r->entropy_total, _RET_IP_);
entropy_count >> ENTROPY_SHIFT, _RET_IP_);
if (r == &input_pool) {
int entropy_bits = entropy_count >> ENTROPY_SHIFT;
struct entropy_store *other = &blocking_pool;
if (crng_init < 2 && entropy_bits >= 128) {
if (crng_init < 2) {
if (entropy_bits < 128)
return;
crng_reseed(&primary_crng, r);
entropy_bits = r->entropy_count >> ENTROPY_SHIFT;
}
/* initialize the blocking pool if necessary */
if (entropy_bits >= random_read_wakeup_bits &&
!other->initialized) {
schedule_work(&other->push_work);
return;
}
/* should we wake readers? */
if (entropy_bits >= random_read_wakeup_bits &&
wq_has_sleeper(&random_read_wait)) {
wake_up_interruptible(&random_read_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
}
/* If the input pool is getting full, send some
* entropy to the blocking pool until it is 75% full.
/* If the input pool is getting full, and the blocking
* pool has room, send some entropy to the blocking
* pool.
*/
if (entropy_bits > random_write_wakeup_bits &&
r->initialized &&
r->entropy_total >= 2*random_read_wakeup_bits) {
struct entropy_store *other = &blocking_pool;
if (other->entropy_count <=
3 * other->poolinfo->poolfracbits / 4) {
schedule_work(&other->push_work);
r->entropy_total = 0;
}
}
if (!work_pending(&other->push_work) &&
(ENTROPY_BITS(r) > 6 * r->poolinfo->poolbytes) &&
(ENTROPY_BITS(other) <= 6 * other->poolinfo->poolbytes))
schedule_work(&other->push_work);
}
}
@@ -1071,7 +1076,6 @@ static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
struct timer_rand_state {
cycles_t last_time;
long last_delta, last_delta2;
unsigned dont_count_entropy:1;
};
#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
@@ -1100,8 +1104,6 @@ void add_device_randomness(const void *buf, unsigned int size)
}
EXPORT_SYMBOL(add_device_randomness);
static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
/*
* This function adds entropy to the entropy "pool" by using timing
* delays. It uses the timer_rand_state structure to make an estimate
@@ -1122,8 +1124,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
} sample;
long delta, delta2, delta3;
preempt_disable();
sample.jiffies = jiffies;
sample.cycles = random_get_entropy();
sample.num = num;
@@ -1135,51 +1135,38 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
* We take into account the first, second and third-order deltas
* in order to make our estimate.
*/
delta = sample.jiffies - state->last_time;
state->last_time = sample.jiffies;
if (!state->dont_count_entropy) {
delta = sample.jiffies - state->last_time;
state->last_time = sample.jiffies;
delta2 = delta - state->last_delta;
state->last_delta = delta;
delta2 = delta - state->last_delta;
state->last_delta = delta;
delta3 = delta2 - state->last_delta2;
state->last_delta2 = delta2;
delta3 = delta2 - state->last_delta2;
state->last_delta2 = delta2;
if (delta < 0)
delta = -delta;
if (delta2 < 0)
delta2 = -delta2;
if (delta3 < 0)
delta3 = -delta3;
if (delta > delta2)
delta = delta2;
if (delta > delta3)
delta = delta3;
if (delta < 0)
delta = -delta;
if (delta2 < 0)
delta2 = -delta2;
if (delta3 < 0)
delta3 = -delta3;
if (delta > delta2)
delta = delta2;
if (delta > delta3)
delta = delta3;
/*
* delta is now minimum absolute delta.
* Round down by 1 bit on general principles,
* and limit entropy entimate to 12 bits.
*/
credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
}
preempt_enable();
/*
* delta is now minimum absolute delta.
* Round down by 1 bit on general principles,
* and limit entropy entimate to 12 bits.
*/
credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
}
void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value)
{
static unsigned char last_value;
/* ignore autorepeat and the like */
if (value == last_value)
return;
last_value = value;
add_timer_randomness(&input_timer_state,
(type << 4) ^ code ^ (code >> 4) ^ value);
trace_add_input_randomness(ENTROPY_BITS(&input_pool));
return;
}
EXPORT_SYMBOL_GPL(add_input_randomness);
@@ -1546,6 +1533,11 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
int large_request = (nbytes > 256);
trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
if (!r->initialized && r->pull) {
xfer_secondary_pool(r, ENTROPY_BITS(r->pull)/8);
if (!r->initialized)
return 0;
}
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, 0, 0);
@@ -1642,6 +1634,56 @@ void get_random_bytes(void *buf, int nbytes)
}
EXPORT_SYMBOL(get_random_bytes);
/*
* Each time the timer fires, we expect that we got an unpredictable
* jump in the cycle counter. Even if the timer is running on another
* CPU, the timer activity will be touching the stack of the CPU that is
* generating entropy..
*
* Note that we don't re-arm the timer in the timer itself - we are
* happy to be scheduled away, since that just makes the load more
* complex, but we do not want the timer to keep ticking unless the
* entropy loop is running.
*
* So the re-arming always happens in the entropy loop itself.
*/
static void entropy_timer(struct timer_list *t)
{
credit_entropy_bits(&input_pool, 1);
}
/*
* If we have an actual cycle counter, see if we can
* generate enough entropy with timing noise
*/
static void try_to_generate_entropy(void)
{
struct {
unsigned long now;
struct timer_list timer;
} stack;
stack.now = random_get_entropy();
/* Slow counter - or none. Don't even bother */
if (stack.now == random_get_entropy())
return;
timer_setup_on_stack(&stack.timer, entropy_timer, 0);
while (!crng_ready()) {
if (!timer_pending(&stack.timer))
mod_timer(&stack.timer, jiffies+1);
mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
schedule();
stack.now = random_get_entropy();
}
del_timer_sync(&stack.timer);
destroy_timer_on_stack(&stack.timer);
mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
}
/*
* Wait for the urandom pool to be seeded and thus guaranteed to supply
* cryptographically secure random numbers. This applies to: the /dev/urandom
@@ -1656,7 +1698,17 @@ int wait_for_random_bytes(void)
{
if (likely(crng_ready()))
return 0;
return wait_event_interruptible(crng_init_wait, crng_ready());
do {
int ret;
ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
if (ret)
return ret > 0 ? 0 : ret;
try_to_generate_entropy();
} while (!crng_ready());
return 0;
}
EXPORT_SYMBOL(wait_for_random_bytes);
@@ -1739,7 +1791,7 @@ void get_random_bytes_arch(void *buf, int nbytes)
if (!arch_get_random_long(&v))
break;
memcpy(p, &v, chunk);
p += chunk;
nbytes -= chunk;
@@ -1750,7 +1802,6 @@ void get_random_bytes_arch(void *buf, int nbytes)
}
EXPORT_SYMBOL(get_random_bytes_arch);
/*
* init_std_data - initialize pool with system data
*
@@ -1842,14 +1893,14 @@ _random_read(int nonblock, char __user *buf, size_t nbytes)
return -EAGAIN;
wait_event_interruptible(random_read_wait,
ENTROPY_BITS(&input_pool) >=
random_read_wakeup_bits);
blocking_pool.initialized &&
(ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits));
if (signal_pending(current))
return -ERESTARTSYS;
}
}
static ssize_t
static ssize_t __maybe_unused
random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
return _random_read(file->f_flags & O_NONBLOCK, buf, nbytes);
@@ -1998,10 +2049,11 @@ static int random_fasync(int fd, struct file *filp, int on)
}
const struct file_operations random_fops = {
.read = random_read,
.read = urandom_read,
.write = random_write,
.poll = random_poll,
.unlocked_ioctl = random_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.fasync = random_fasync,
.llseek = noop_llseek,
};
@@ -2010,6 +2062,7 @@ const struct file_operations urandom_fops = {
.read = urandom_read,
.write = random_write,
.unlocked_ioctl = random_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.fasync = random_fasync,
.llseek = noop_llseek,
};

View File

@@ -106,14 +106,9 @@ static inline int clk_osm_read_reg(struct clk_osm *c, u32 offset)
return readl_relaxed(c->vbase + offset);
}
static inline int clk_osm_read_reg_no_log(struct clk_osm *c, u32 offset)
{
return readl_relaxed_no_log(c->vbase + offset);
}
static inline int clk_osm_mb(struct clk_osm *c)
{
return readl_relaxed_no_log(c->vbase + ENABLE_REG);
return readl_relaxed(c->vbase + ENABLE_REG);
}
static long clk_osm_list_rate(struct clk_hw *hw, unsigned int n,
@@ -914,7 +909,7 @@ static u64 clk_osm_get_cpu_cycle_counter(int cpu)
* core DCVS is disabled.
*/
core_num = parent->per_core_dcvs ? c->core_num : 0;
val = clk_osm_read_reg_no_log(parent,
val = clk_osm_read_reg(parent,
OSM_CYCLE_COUNTER_STATUS_REG(core_num));
if (val < c->prev_cycle_counter) {

View File

@@ -101,20 +101,20 @@ void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
struct arch_timer *timer = to_arch_timer(clk);
switch (reg) {
case ARCH_TIMER_REG_CTRL:
writel_relaxed_no_log(val, timer->base + CNTP_CTL);
writel_relaxed(val, timer->base + CNTP_CTL);
break;
case ARCH_TIMER_REG_TVAL:
writel_relaxed_no_log(val, timer->base + CNTP_TVAL);
writel_relaxed(val, timer->base + CNTP_TVAL);
break;
}
} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
struct arch_timer *timer = to_arch_timer(clk);
switch (reg) {
case ARCH_TIMER_REG_CTRL:
writel_relaxed_no_log(val, timer->base + CNTV_CTL);
writel_relaxed(val, timer->base + CNTV_CTL);
break;
case ARCH_TIMER_REG_TVAL:
writel_relaxed_no_log(val, timer->base + CNTV_TVAL);
writel_relaxed(val, timer->base + CNTV_TVAL);
break;
}
} else {
@@ -132,20 +132,20 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
struct arch_timer *timer = to_arch_timer(clk);
switch (reg) {
case ARCH_TIMER_REG_CTRL:
val = readl_relaxed_no_log(timer->base + CNTP_CTL);
val = readl_relaxed(timer->base + CNTP_CTL);
break;
case ARCH_TIMER_REG_TVAL:
val = readl_relaxed_no_log(timer->base + CNTP_TVAL);
val = readl_relaxed(timer->base + CNTP_TVAL);
break;
}
} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
struct arch_timer *timer = to_arch_timer(clk);
switch (reg) {
case ARCH_TIMER_REG_CTRL:
val = readl_relaxed_no_log(timer->base + CNTV_CTL);
val = readl_relaxed(timer->base + CNTV_CTL);
break;
case ARCH_TIMER_REG_TVAL:
val = readl_relaxed_no_log(timer->base + CNTV_TVAL);
val = readl_relaxed(timer->base + CNTV_TVAL);
break;
}
} else {
@@ -893,11 +893,11 @@ void arch_timer_mem_get_cval(u32 *lo, u32 *hi)
if (!arch_counter_base)
return;
ctrl = readl_relaxed_no_log(arch_counter_base + CNTV_CTL);
ctrl = readl_relaxed(arch_counter_base + CNTV_CTL);
if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
*lo = readl_relaxed_no_log(arch_counter_base + CNTCVAL_LO);
*hi = readl_relaxed_no_log(arch_counter_base + CNTCVAL_HI);
*lo = readl_relaxed(arch_counter_base + CNTCVAL_LO);
*hi = readl_relaxed(arch_counter_base + CNTCVAL_HI);
}
}
@@ -916,9 +916,9 @@ static u64 arch_counter_get_cntvct_mem(void)
u32 vct_lo, vct_hi, tmp_hi;
do {
vct_hi = readl_relaxed_no_log(arch_counter_base + CNTVCT_HI);
vct_lo = readl_relaxed_no_log(arch_counter_base + CNTVCT_LO);
tmp_hi = readl_relaxed_no_log(arch_counter_base + CNTVCT_HI);
vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
} while (vct_hi != tmp_hi);
return ((u64) vct_hi << 32) | vct_lo;
@@ -1295,7 +1295,7 @@ arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
return NULL;
}
cnttidr = readl_relaxed_no_log(cntctlbase + CNTTIDR);
cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
/*
* Try to find a virtual capable frame. Otherwise fall back to a

View File

@@ -276,7 +276,7 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
u64 delta_ns, lst;
if (!cpufreq_can_do_remote_dvfs(policy_dbs->policy))
if (!cpufreq_this_cpu_can_update(policy_dbs->policy))
return;
/*

View File

@@ -37,6 +37,24 @@ static int enabled_devices;
static int off __read_mostly;
static int initialized __read_mostly;
#ifdef CONFIG_SMP
static atomic_t idled = ATOMIC_INIT(0);
#if NR_CPUS > 32
#error idled CPU mask not big enough for NR_CPUS
#endif
void cpuidle_set_idle_cpu(unsigned int cpu)
{
atomic_or(BIT(cpu), &idled);
}
void cpuidle_clear_idle_cpu(unsigned int cpu)
{
atomic_andnot(BIT(cpu), &idled);
}
#endif
int cpuidle_disabled(void)
{
return off;
@@ -239,17 +257,17 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
if (!cpuidle_state_is_coupled(drv, index))
local_irq_enable();
diff = ktime_us_delta(time_end, time_start);
if (diff > INT_MAX)
diff = INT_MAX;
dev->last_residency = (int) diff;
if (entered_state >= 0) {
/* Update cpuidle counters */
/* This can be moved to within driver enter routine
/*
* Update cpuidle counters
* This can be moved to within driver enter routine,
* but that results in multiple copies of same code.
*/
diff = ktime_us_delta(time_end, time_start);
if (diff > INT_MAX)
diff = INT_MAX;
dev->last_residency = (int)diff;
dev->states_usage[entered_state].time += dev->last_residency;
dev->states_usage[entered_state].usage++;
} else {
@@ -643,22 +661,12 @@ EXPORT_SYMBOL_GPL(cpuidle_register);
static void wake_up_idle_cpus(void *v)
{
int cpu;
struct cpumask cpus;
unsigned long cpus = atomic_read(&idled) & *cpumask_bits(to_cpumask(v));
preempt_disable();
if (v) {
cpumask_andnot(&cpus, v, cpu_isolated_mask);
cpumask_and(&cpus, &cpus, cpu_online_mask);
} else
cpumask_andnot(&cpus, cpu_online_mask, cpu_isolated_mask);
for_each_cpu(cpu, &cpus) {
if (cpu == smp_processor_id())
continue;
wake_up_if_idle(cpu);
}
preempt_enable();
/* Use READ_ONCE to get the isolated mask outside cpu_add_remove_lock */
cpus &= ~READ_ONCE(*cpumask_bits(cpu_isolated_mask));
if (cpus)
arch_send_wakeup_ipi_mask(to_cpumask(&cpus));
}
/*

View File

@@ -41,7 +41,6 @@
#include <soc/qcom/event_timer.h>
#include <soc/qcom/lpm_levels.h>
#include <soc/qcom/lpm-stats.h>
#include <soc/qcom/minidump.h>
#include <asm/arch_timer.h>
#include <asm/suspend.h>
#include <asm/cpuidle.h>
@@ -60,30 +59,6 @@
#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
#define BIAS_HYST (bias_hyst * NSEC_PER_MSEC)
enum {
MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1),
};
enum debug_event {
CPU_ENTER,
CPU_EXIT,
CLUSTER_ENTER,
CLUSTER_EXIT,
CPU_HP_STARTING,
CPU_HP_DYING,
};
struct lpm_debug {
u64 time;
enum debug_event evt;
int cpu;
uint32_t arg1;
uint32_t arg2;
uint32_t arg3;
uint32_t arg4;
};
static struct system_pm_ops *sys_pm_ops;
@@ -122,9 +97,6 @@ static bool suspend_in_progress;
static struct hrtimer lpm_hrtimer;
static DEFINE_PER_CPU(struct hrtimer, histtimer);
static DEFINE_PER_CPU(struct hrtimer, biastimer);
static struct lpm_debug *lpm_debug;
static phys_addr_t lpm_debug_phys;
static const int num_dbg_elements = 0x100;
static void cluster_unprepare(struct lpm_cluster *cluster,
const struct cpumask *cpu, int child_idx, bool from_idle,
@@ -306,38 +278,10 @@ int lpm_get_latency(struct latency_level *level, uint32_t *latency)
}
EXPORT_SYMBOL(lpm_get_latency);
static void update_debug_pc_event(enum debug_event event, uint32_t arg1,
uint32_t arg2, uint32_t arg3, uint32_t arg4)
{
struct lpm_debug *dbg;
int idx;
static DEFINE_SPINLOCK(debug_lock);
static int pc_event_index;
if (!lpm_debug)
return;
spin_lock(&debug_lock);
idx = pc_event_index++;
dbg = &lpm_debug[idx & (num_dbg_elements - 1)];
dbg->evt = event;
dbg->time = arch_counter_get_cntvct();
dbg->cpu = raw_smp_processor_id();
dbg->arg1 = arg1;
dbg->arg2 = arg2;
dbg->arg3 = arg3;
dbg->arg4 = arg4;
spin_unlock(&debug_lock);
}
static int lpm_dying_cpu(unsigned int cpu)
{
struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
update_debug_pc_event(CPU_HP_DYING, cpu,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], false);
cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
return 0;
}
@@ -346,9 +290,6 @@ static int lpm_starting_cpu(unsigned int cpu)
{
struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
update_debug_pc_event(CPU_HP_STARTING, cpu,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], false);
cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false,
0, true);
return 0;
@@ -734,7 +675,7 @@ static int cpu_power_select(struct cpuidle_device *dev,
min_residency = pwr_params->min_residency;
max_residency = pwr_params->max_residency;
if (latency_us < lvl_latency_us)
if (latency_us <= lvl_latency_us)
break;
if (next_event_us) {
@@ -1077,7 +1018,7 @@ static int cluster_select(struct lpm_cluster *cluster, bool from_idle,
&level->num_cpu_votes))
continue;
if (from_idle && latency_us < pwr_params->exit_latency)
if (from_idle && latency_us <= pwr_params->exit_latency)
break;
if (sleep_us < (pwr_params->exit_latency +
@@ -1137,9 +1078,6 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
return -EPERM;
if (idx != cluster->default_level) {
update_debug_pc_event(CLUSTER_ENTER, idx,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], from_idle);
trace_cluster_enter(cluster->cluster_name, idx,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], from_idle);
@@ -1302,9 +1240,6 @@ static void cluster_unprepare(struct lpm_cluster *cluster,
if (sys_pm_ops && sys_pm_ops->exit)
sys_pm_ops->exit(success);
update_debug_pc_event(CLUSTER_EXIT, cluster->last_level,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], from_idle);
trace_cluster_exit(cluster->cluster_name, cluster->last_level,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], from_idle);
@@ -1418,15 +1353,11 @@ static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle)
affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
state_id += power_state + affinity_level + cpu->levels[idx].psci_id;
update_debug_pc_event(CPU_ENTER, state_id,
0xdeaffeed, 0xdeaffeed, from_idle);
stop_critical_timings();
success = !arm_cpuidle_suspend(state_id);
start_critical_timings();
update_debug_pc_event(CPU_EXIT, state_id,
success, 0xdeaffeed, from_idle);
if (from_idle && cpu->levels[idx].use_bc_timer)
tick_broadcast_exit();
@@ -1511,7 +1442,9 @@ static int lpm_cpuidle_enter(struct cpuidle_device *dev,
if (need_resched())
goto exit;
cpuidle_set_idle_cpu(dev->cpu);
success = psci_enter_sleep(cpu, idx, true);
cpuidle_clear_idle_cpu(dev->cpu);
exit:
end_time = ktime_to_ns(ktime_get());
@@ -1803,11 +1736,9 @@ static const struct platform_s2idle_ops lpm_s2idle_ops = {
static int lpm_probe(struct platform_device *pdev)
{
int ret;
int size;
unsigned int cpu;
struct hrtimer *cpu_histtimer;
struct kobject *module_kobj = NULL;
struct md_region md_entry;
get_online_cpus();
lpm_root_node = lpm_of_parse_cluster(pdev);
@@ -1839,10 +1770,6 @@ static int lpm_probe(struct platform_device *pdev)
cluster_timer_init(lpm_root_node);
size = num_dbg_elements * sizeof(struct lpm_debug);
lpm_debug = dma_alloc_coherent(&pdev->dev, size,
&lpm_debug_phys, GFP_KERNEL);
register_cluster_lpm_stats(lpm_root_node, NULL);
ret = cluster_cpuidle_register(lpm_root_node);
@@ -1871,14 +1798,6 @@ static int lpm_probe(struct platform_device *pdev)
goto failed;
}
/* Add lpm_debug to Minidump*/
strlcpy(md_entry.name, "KLPMDEBUG", sizeof(md_entry.name));
md_entry.virt_addr = (uintptr_t)lpm_debug;
md_entry.phys_addr = lpm_debug_phys;
md_entry.size = size;
if (msm_minidump_add_region(&md_entry))
pr_info("Failed to add lpm_debug in Minidump\n");
return 0;
failed:
free_cluster_node(lpm_root_node);

View File

@@ -141,9 +141,11 @@ struct qcota_stat {
u64 f9_op_fail;
};
static struct qcota_stat _qcota_stat;
#ifdef CONFIG_DEBUG_FS
static struct dentry *_debug_dent;
static char _debug_read_buf[DEBUG_MAX_RW_BUF];
static int _debug_qcota;
#endif
static struct ota_dev_control *qcota_control(void)
{
@@ -834,6 +836,7 @@ static struct platform_driver qcota_plat_driver = {
},
};
#ifdef CONFIG_DEBUG_FS
static int _disp_stats(void)
{
struct qcota_stat *pstat;
@@ -985,15 +988,15 @@ err:
debugfs_remove_recursive(_debug_dent);
return rc;
}
#endif
static int __init qcota_init(void)
{
int rc;
struct ota_dev_control *podev;
rc = _qcota_debug_init();
if (rc)
return rc;
#ifdef CONFIG_DEBUG_FS
_qcota_debug_init();
#endif
podev = &qcota_dev;
INIT_LIST_HEAD(&podev->ready_commands);
@@ -1007,7 +1010,9 @@ static int __init qcota_init(void)
}
static void __exit qcota_exit(void)
{
#ifdef CONFIG_DEBUG_FS
debugfs_remove_recursive(_debug_dent);
#endif
platform_driver_unregister(&qcota_plat_driver);
}

View File

@@ -233,9 +233,11 @@ struct qcedev_stat {
};
static struct qcedev_stat _qcedev_stat;
#ifdef CONFIG_DEBUG_FS
static struct dentry *_debug_dent;
static char _debug_read_buf[DEBUG_MAX_RW_BUF];
static int _debug_qcedev;
#endif
static struct qcedev_control *qcedev_minor_to_control(unsigned int n)
{
@@ -2231,6 +2233,7 @@ static struct platform_driver qcedev_plat_driver = {
},
};
#ifdef CONFIG_DEBUG_FS
static int _disp_stats(int id)
{
struct qcedev_stat *pstat;
@@ -2320,16 +2323,22 @@ err:
debugfs_remove_recursive(_debug_dent);
return rc;
}
#endif
static int qcedev_init(void)
{
#ifdef CONFIG_DEBUG_FS
_qcedev_debug_init();
#endif
return platform_driver_register(&qcedev_plat_driver);
}
static void qcedev_exit(void)
{
#ifdef CONFIG_DEBUG_FS
debugfs_remove_recursive(_debug_dent);
#endif
platform_driver_unregister(&qcedev_plat_driver);
}

View File

@@ -125,8 +125,10 @@ struct crypto_stat {
u64 ahash_op_fail;
};
static struct crypto_stat _qcrypto_stat;
#ifdef CONFIG_DEBUG_FS
static struct dentry *_debug_dent;
static char _debug_read_buf[DEBUG_MAX_RW_BUF];
#endif
static bool _qcrypto_init_assign;
struct crypto_priv;
struct qcrypto_req_control {
@@ -1147,6 +1149,7 @@ static void _qcrypto_cra_aead_aes_exit(struct crypto_aead *tfm)
ctx->ahash_aead_aes192_fb = NULL;
}
#ifdef CONFIG_DEBUG_FS
static int _disp_stats(int id)
{
struct crypto_stat *pstat;
@@ -1320,6 +1323,7 @@ static int _disp_stats(int id)
i, cp->cpu_req[i]);
return len;
}
#endif
static void _qcrypto_remove_engine(struct crypto_engine *pengine)
{
@@ -5462,6 +5466,7 @@ static struct platform_driver __qcrypto = {
},
};
#ifdef CONFIG_DEBUG_FS
static int _debug_qcrypto;
static int _debug_stats_open(struct inode *inode, struct file *file)
@@ -5550,12 +5555,16 @@ err:
debugfs_remove_recursive(_debug_dent);
return rc;
}
#endif
static int __init _qcrypto_init(void)
{
struct crypto_priv *pcp = &qcrypto_dev;
#ifdef CONFIG_DEBUG_FS
_qcrypto_debug_init();
#endif
INIT_LIST_HEAD(&pcp->alg_list);
INIT_LIST_HEAD(&pcp->engine_list);
init_llist_head(&pcp->ordered_resp_list);
@@ -5580,7 +5589,9 @@ static int __init _qcrypto_init(void)
static void __exit _qcrypto_exit(void)
{
pr_debug("%s Unregister QCRYPTO\n", __func__);
#ifdef CONFIG_DEBUG_FS
debugfs_remove_recursive(_debug_dent);
#endif
platform_driver_unregister(&__qcrypto);
}

View File

@@ -47,6 +47,18 @@
static atomic_long_t name_counter;
static struct kmem_cache *kmem_attach_pool;
static struct kmem_cache *kmem_dma_buf_pool;
void __init init_dma_buf_kmem_pool(void)
{
kmem_attach_pool = KMEM_CACHE(dma_buf_attachment, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
kmem_dma_buf_pool = kmem_cache_create("dma_buf",
(sizeof(struct dma_buf) + sizeof(struct reservation_object)),
(sizeof(struct dma_buf) + sizeof(struct reservation_object)),
SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
}
static inline int is_dma_buf_file(struct file *);
struct dma_buf_list {
@@ -138,7 +150,10 @@ static int dma_buf_release(struct inode *inode, struct file *file)
module_put(dmabuf->owner);
kfree(dmabuf->buf_name);
kfree(dmabuf);
if (dmabuf->from_kmem)
kmem_cache_free(kmem_dma_buf_pool, dmabuf);
else
kfree(dmabuf);
return 0;
}
@@ -564,6 +579,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
char *bufname;
int ret;
long cnt;
bool from_kmem;
if (!exp_info->resv)
alloc_size += sizeof(struct reservation_object);
@@ -592,7 +608,16 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
goto err_module;
}
dmabuf = kzalloc(alloc_size, GFP_KERNEL);
from_kmem = (alloc_size ==
(sizeof(struct dma_buf) + sizeof(struct reservation_object)));
if (from_kmem) {
dmabuf = kmem_cache_zalloc(kmem_dma_buf_pool, GFP_KERNEL);
dmabuf->from_kmem = true;
} else {
dmabuf = kzalloc(alloc_size, GFP_KERNEL);
}
if (!dmabuf) {
ret = -ENOMEM;
goto err_name;
@@ -637,7 +662,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
return dmabuf;
err_dmabuf:
kfree(dmabuf);
if (from_kmem)
kmem_cache_free(kmem_dma_buf_pool, dmabuf);
else
kfree(dmabuf);
err_name:
kfree(bufname);
err_module:
@@ -744,8 +772,8 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
if (WARN_ON(!dmabuf || !dev))
return ERR_PTR(-EINVAL);
attach = kzalloc(sizeof(*attach), GFP_KERNEL);
if (!attach)
attach = kmem_cache_zalloc(kmem_attach_pool, GFP_KERNEL);
if (attach == NULL)
return ERR_PTR(-ENOMEM);
attach->dev = dev;
@@ -764,7 +792,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
return attach;
err_attach:
kfree(attach);
kmem_cache_free(kmem_attach_pool, attach);
mutex_unlock(&dmabuf->lock);
return ERR_PTR(ret);
}
@@ -789,7 +817,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
dmabuf->ops->detach(dmabuf, attach);
mutex_unlock(&dmabuf->lock);
kfree(attach);
kmem_cache_free(kmem_attach_pool, attach);
}
EXPORT_SYMBOL_GPL(dma_buf_detach);

View File

@@ -27,12 +27,18 @@
#include <uapi/linux/sync_file.h>
static const struct file_operations sync_file_fops;
static struct kmem_cache *kmem_sync_file_pool;
void __init init_sync_kmem_pool(void)
{
kmem_sync_file_pool = KMEM_CACHE(sync_file, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
}
static struct sync_file *sync_file_alloc(void)
{
struct sync_file *sync_file;
sync_file = kzalloc(sizeof(*sync_file), GFP_KERNEL);
sync_file = kmem_cache_zalloc(kmem_sync_file_pool, GFP_KERNEL);
if (!sync_file)
return NULL;
@@ -48,7 +54,7 @@ static struct sync_file *sync_file_alloc(void)
return sync_file;
err:
kfree(sync_file);
kmem_cache_free(kmem_sync_file_pool, sync_file);
return NULL;
}
@@ -307,7 +313,7 @@ static int sync_file_release(struct inode *inode, struct file *file)
if (test_bit(POLL_ENABLED, &sync_file->flags))
dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
dma_fence_put(sync_file->fence);
kfree(sync_file);
kmem_cache_free(kmem_sync_file_pool, sync_file);
return 0;
}
@@ -408,6 +414,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
{
struct sync_file_info info;
struct sync_fence_info *fence_info = NULL;
struct sync_fence_info fence_info_onstack[4] __aligned(8);
struct dma_fence **fences;
__u32 size;
int num_fences, ret, i;
@@ -437,9 +444,15 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
return -EINVAL;
size = num_fences * sizeof(*fence_info);
fence_info = kzalloc(size, GFP_KERNEL);
if (!fence_info)
return -ENOMEM;
if (likely(size <= sizeof(fence_info_onstack))) {
memset(fence_info_onstack, 0, sizeof(fence_info_onstack));
fence_info = fence_info_onstack;
} else {
fence_info = kzalloc(size, GFP_KERNEL);
if (!fence_info)
return -ENOMEM;
}
for (i = 0; i < num_fences; i++) {
int status = sync_fill_fence_info(fences[i], &fence_info[i]);
@@ -462,7 +475,8 @@ no_fences:
ret = 0;
out:
kfree(fence_info);
if (unlikely(fence_info != fence_info_onstack))
kfree(fence_info);
return ret;
}

View File

@@ -1087,6 +1087,9 @@ int dma_async_device_register(struct dma_device *device)
dma_channel_rebalance();
mutex_unlock(&dma_list_mutex);
if (!chancnt)
kfree(idr_ref);
return 0;
err_out:

View File

@@ -43,16 +43,10 @@
#define GPI_LOG(gpi_dev, fmt, ...) do { \
if (gpi_dev->klog_lvl != LOG_LVL_MASK_ALL) \
dev_dbg(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl != LOG_LVL_MASK_ALL) \
ipc_log_string(gpi_dev->ilctxt, \
"%s: " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define GPI_ERR(gpi_dev, fmt, ...) do { \
if (gpi_dev->klog_lvl >= LOG_LVL_ERROR) \
dev_err(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl >= LOG_LVL_ERROR) \
ipc_log_string(gpi_dev->ilctxt, \
"%s: " fmt, __func__, ##__VA_ARGS__); \
} while (0)
/* gpii specific logging macros */
@@ -60,28 +54,16 @@
if (gpii->klog_lvl >= LOG_LVL_INFO) \
pr_info("%s:%u:%s: " fmt, gpii->label, ch, \
__func__, ##__VA_ARGS__); \
if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_INFO) \
ipc_log_string(gpii->ilctxt, \
"ch:%u %s: " fmt, ch, \
__func__, ##__VA_ARGS__); \
} while (0)
#define GPII_ERR(gpii, ch, fmt, ...) do { \
if (gpii->klog_lvl >= LOG_LVL_ERROR) \
pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
__func__, ##__VA_ARGS__); \
if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_ERROR) \
ipc_log_string(gpii->ilctxt, \
"ch:%u %s: " fmt, ch, \
__func__, ##__VA_ARGS__); \
} while (0)
#define GPII_CRITIC(gpii, ch, fmt, ...) do { \
if (gpii->klog_lvl >= LOG_LVL_CRITICAL) \
pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
__func__, ##__VA_ARGS__); \
if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_CRITICAL) \
ipc_log_string(gpii->ilctxt, \
"ch:%u %s: " fmt, ch, \
__func__, ##__VA_ARGS__); \
} while (0)
enum DEBUG_LOG_LVL {
@@ -109,19 +91,11 @@ enum EV_PRIORITY {
if (gpii->klog_lvl >= LOG_LVL_REG_ACCESS) \
pr_info("%s:%u:%s: " fmt, gpii->label, \
ch, __func__, ##__VA_ARGS__); \
if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_REG_ACCESS) \
ipc_log_string(gpii->ilctxt, \
"ch:%u %s: " fmt, ch, \
__func__, ##__VA_ARGS__); \
} while (0)
#define GPII_VERB(gpii, ch, fmt, ...) do { \
if (gpii->klog_lvl >= LOG_LVL_VERBOSE) \
pr_info("%s:%u:%s: " fmt, gpii->label, \
ch, __func__, ##__VA_ARGS__); \
if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_VERBOSE) \
ipc_log_string(gpii->ilctxt, \
"ch:%u %s: " fmt, ch, \
__func__, ##__VA_ARGS__); \
} while (0)
#else

View File

@@ -1,6 +1,5 @@
config MSM_TZ_LOG
tristate "MSM Trust Zone (TZ) Log Driver"
depends on DEBUG_FS
help
This option enables a driver with a debugfs interface for messages
produced by the Secure code (Trust zone). These messages provide

View File

@@ -1118,17 +1118,16 @@ static int tz_log_probe(struct platform_device *pdev)
tzdbg.diag_buf = (struct tzdbg_t *)ptr;
if (tzdbgfs_init(pdev))
goto err;
if (tzdbgfs_init(pdev)) {
kfree(tzdbg.diag_buf);
tzdbg.diag_buf = NULL;
}
tzdbg_register_qsee_log_buf(pdev);
tzdbg_get_tz_version();
return 0;
err:
kfree(tzdbg.diag_buf);
return -ENXIO;
}
static int tz_log_remove(struct platform_device *pdev)

View File

@@ -101,7 +101,7 @@
int drm_irq_install(struct drm_device *dev, int irq)
{
int ret;
unsigned long sh_flags = 0;
unsigned long sh_flags = IRQF_PERF_CRITICAL;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;

View File

@@ -73,7 +73,6 @@ config DRM_MSM_DSI_STAGING
config DSI_PARSER
bool "Enable DSI panel configuration parser"
depends on DYNAMIC_DEBUG
default y
help
Choose this option if you need text parser for a DSI panel
@@ -198,3 +197,7 @@ config DRM_SDE_RSC
avoids the display core power collapse. A client can also register
for display core power collapse events on rsc.
config FENCE_DEBUG
bool "Print fence name to userspace"
depends on DRM_MSM
default n

View File

@@ -221,3 +221,5 @@ msm_drm-$(CONFIG_DRM_MSM) += \
msm_debugfs.o
obj-$(CONFIG_DRM_MSM) += msm_drm.o
CFLAGS_sde_crtc.o += -Wframe-larger-than=8192

View File

@@ -14,6 +14,9 @@
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#if defined(CONFIG_ANDROID) && !defined(CONFIG_DEBUG_FS)
#define CONFIG_DEBUG_FS
#endif
#include <linux/debugfs.h>
#include <linux/slab.h>

View File

@@ -14,6 +14,9 @@
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#if defined(CONFIG_ANDROID) && !defined(CONFIG_DEBUG_FS)
#define CONFIG_DEBUG_FS
#endif
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/uaccess.h>

View File

@@ -14,6 +14,9 @@
#define pr_fmt(fmt) "[drm-dp]: %s: " fmt, __func__
#if defined(CONFIG_ANDROID) && !defined(CONFIG_DEBUG_FS)
#define CONFIG_DEBUG_FS
#endif
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>

View File

@@ -210,7 +210,7 @@ static int dsi_ctrl_debugfs_init(struct dsi_ctrl *dsi_ctrl,
dir = debugfs_create_dir(dsi_ctrl->name, parent);
if (IS_ERR_OR_NULL(dir)) {
rc = PTR_ERR(dir);
pr_err("[DSI_%d] debugfs create dir failed, rc=%d\n",
pr_debug("[DSI_%d] debugfs create dir failed, rc=%d\n",
dsi_ctrl->cell_index, rc);
goto error;
}
@@ -1923,7 +1923,7 @@ static struct platform_driver dsi_ctrl_driver = {
},
};
#if defined(CONFIG_DEBUG_FS)
#if 0
void dsi_ctrl_debug_dump(u32 *entries, u32 size)
{
@@ -2037,12 +2037,7 @@ int dsi_ctrl_drv_init(struct dsi_ctrl *dsi_ctrl, struct dentry *parent)
goto error;
}
rc = dsi_ctrl_debugfs_init(dsi_ctrl, parent);
if (rc) {
pr_err("[DSI_%d] failed to init debug fs, rc=%d\n",
dsi_ctrl->cell_index, rc);
goto error;
}
dsi_ctrl_debugfs_init(dsi_ctrl, parent);
error:
mutex_unlock(&dsi_ctrl->ctrl_lock);
@@ -2577,7 +2572,7 @@ static int _dsi_ctrl_setup_isr(struct dsi_ctrl *dsi_ctrl)
dsi_ctrl->irq_info.irq_num = irq_num;
disable_irq_nosync(irq_num);
pr_info("[DSI_%d] IRQ %d registered\n",
pr_debug("[DSI_%d] IRQ %d registered\n",
dsi_ctrl->cell_index, irq_num);
}
}

View File

@@ -390,36 +390,56 @@ end:
static irqreturn_t dsi_display_panel_te_irq_handler(int irq, void *data)
{
struct dsi_display *display = (struct dsi_display *)data;
struct dsi_display_te_listener *tl;
/*
* This irq handler is used for sole purpose of identifying
* ESD attacks on panel and we can safely assume IRQ_HANDLED
* in case of display not being initialized yet
*/
if (!display)
if (unlikely(!display))
return IRQ_HANDLED;
SDE_EVT32(SDE_EVTLOG_FUNC_CASE1);
complete_all(&display->esd_te_gate);
spin_lock(&display->te_lock);
list_for_each_entry(tl, &display->te_listeners, head)
tl->handler(tl);
spin_unlock(&display->te_lock);
return IRQ_HANDLED;
}
static void dsi_display_change_te_irq_status(struct dsi_display *display,
bool enable)
int dsi_display_add_te_listener(struct dsi_display *display,
struct dsi_display_te_listener *tl)
{
if (!display) {
pr_err("Invalid params\n");
return;
}
unsigned long flags;
bool needs_enable;
/* Handle unbalanced irq enable/disbale calls */
if (enable && !display->is_te_irq_enabled) {
if (!display || !tl)
return -ENOENT;
spin_lock_irqsave(&display->te_lock, flags);
needs_enable = list_empty(&display->te_listeners);
list_add(&tl->head, &display->te_listeners);
spin_unlock_irqrestore(&display->te_lock, flags);
if (needs_enable)
enable_irq(gpio_to_irq(display->disp_te_gpio));
display->is_te_irq_enabled = true;
} else if (!enable && display->is_te_irq_enabled) {
disable_irq(gpio_to_irq(display->disp_te_gpio));
display->is_te_irq_enabled = false;
}
return 0;
}
int dsi_display_remove_te_listener(struct dsi_display *display,
struct dsi_display_te_listener *tl)
{
unsigned long flags;
if (!display || !tl)
return -ENOENT;
spin_lock_irqsave(&display->te_lock, flags);
list_del(&tl->head);
if (list_empty(&display->te_listeners))
disable_irq_nosync(gpio_to_irq(display->disp_te_gpio));
spin_unlock_irqrestore(&display->te_lock, flags);
return 0;
}
static void dsi_display_register_te_irq(struct dsi_display *display)
@@ -446,15 +466,16 @@ static void dsi_display_register_te_irq(struct dsi_display *display)
goto error;
}
init_completion(&display->esd_te_gate);
te_irq = gpio_to_irq(display->disp_te_gpio);
spin_lock_init(&display->te_lock);
INIT_LIST_HEAD(&display->te_listeners);
/* Avoid deferred spurious irqs with disable_irq() */
irq_set_status_flags(te_irq, IRQ_DISABLE_UNLAZY);
rc = devm_request_irq(dev, te_irq, dsi_display_panel_te_irq_handler,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"TE_GPIO", display);
IRQF_TRIGGER_RISING, "TE_GPIO", display);
if (rc) {
pr_err("TE request_irq failed for ESD rc:%d\n", rc);
irq_clear_status_flags(te_irq, IRQ_DISABLE_UNLAZY);
@@ -462,7 +483,6 @@ static void dsi_display_register_te_irq(struct dsi_display *display)
}
disable_irq(te_irq);
display->is_te_irq_enabled = false;
return;
@@ -756,21 +776,31 @@ static int dsi_display_status_bta_request(struct dsi_display *display)
return rc;
}
static void _handle_esd_te(struct dsi_display_te_listener *tl)
{
struct completion *esd_te_gate = tl->data;
complete(esd_te_gate);
}
static int dsi_display_status_check_te(struct dsi_display *display)
{
int rc = 1;
int const esd_te_timeout = msecs_to_jiffies(3*20);
DECLARE_COMPLETION(esd_te_gate);
struct dsi_display_te_listener tl = {
.handler = _handle_esd_te,
.data = &esd_te_gate,
};
dsi_display_change_te_irq_status(display, true);
dsi_display_add_te_listener(display, &tl);
reinit_completion(&display->esd_te_gate);
if (!wait_for_completion_timeout(&display->esd_te_gate,
esd_te_timeout)) {
if (!wait_for_completion_timeout(&esd_te_gate, esd_te_timeout)) {
pr_err("TE check failed\n");
rc = -EINVAL;
}
dsi_display_change_te_irq_status(display, false);
dsi_display_remove_te_listener(display, &tl);
return rc;
}
@@ -1417,7 +1447,6 @@ static ssize_t debugfs_alter_esd_check_mode(struct file *file,
if (!strcmp(buf, "te_signal_check\n")) {
pr_info("ESD check is switched to TE mode by user\n");
esd_config->status_mode = ESD_MODE_PANEL_TE;
dsi_display_change_te_irq_status(display, true);
}
if (!strcmp(buf, "reg_read\n")) {
@@ -1430,8 +1459,6 @@ static ssize_t debugfs_alter_esd_check_mode(struct file *file,
goto error;
}
esd_config->status_mode = ESD_MODE_REG_READ;
if (dsi_display_is_te_based_esd(display))
dsi_display_change_te_irq_status(display, false);
}
if (!strcmp(buf, "esd_sw_sim_success\n"))
@@ -1552,7 +1579,7 @@ static int dsi_display_debugfs_init(struct dsi_display *display)
dir = debugfs_create_dir(display->name, NULL);
if (IS_ERR_OR_NULL(dir)) {
rc = PTR_ERR(dir);
pr_err("[%s] debugfs create dir failed, rc = %d\n",
pr_debug("[%s] debugfs create dir failed, rc = %d\n",
display->name, rc);
goto error;
}
@@ -5283,11 +5310,7 @@ static int dsi_display_bind(struct device *dev,
goto error;
}
rc = dsi_display_debugfs_init(display);
if (rc) {
pr_err("[%s] debugfs init failed, rc=%d\n", display->name, rc);
goto error;
}
dsi_display_debugfs_init(display);
atomic_set(&display->clkrate_change_pending, 0);
display->cached_clk_rate = 0;

View File

@@ -158,8 +158,8 @@ struct dsi_display_ext_bridge {
* @sw_te_using_wd: Is software te enabled
* @display_lock: Mutex for dsi_display interface.
* @disp_te_gpio: GPIO for panel TE interrupt.
* @is_te_irq_enabled:bool to specify whether TE interrupt is enabled.
* @esd_te_gate: completion gate to signal TE interrupt.
* @te_listeners: List of listeners registered for TE callbacks.
* @te_lock: Lock protecting te_listeners list.
* @ctrl_count: Number of DSI interfaces required by panel.
* @ctrl: Controller information for DSI display.
* @panel: Handle to DSI panel.
@@ -209,8 +209,8 @@ struct dsi_display {
bool sw_te_using_wd;
struct mutex display_lock;
int disp_te_gpio;
bool is_te_irq_enabled;
struct completion esd_te_gate;
struct list_head te_listeners;
spinlock_t te_lock;
u32 ctrl_count;
struct dsi_display_ctrl ctrl[MAX_DSI_CTRLS_PER_DISPLAY];
@@ -285,6 +285,48 @@ struct dsi_display {
atomic_t fod_ui;
};
/**
* struct dsi_display_te_listener - data for TE listener
* @head: List node pointer.
* @handler: TE callback function, called in atomic context.
* @data: Private data that is not modified by add/remove API
*/
struct dsi_display_te_listener {
struct list_head head;
void (*handler)(struct dsi_display_te_listener *);
void *data;
};
/**
* dsi_display_add_te_listener - adds a new listener for TE events
* @display: Handle to display
* @tl: TE listener struct
*
* Adds a new TE listener and enables TE irq if there are no other listeners.
* Upon TE interrupt, the handler passed in will be called back in atomic
* context.
*
* Note: caller is responsible for lifetime of @tl which should be available
* until dsi_display_remove_te_listener() is called.
*
* Returns: 0 on success, otherwise errno on failure
*/
int dsi_display_add_te_listener(struct dsi_display *display,
struct dsi_display_te_listener *tl);
/**
* dsi_display_add_te_listener - removes listener for TE events
* @display: Handle to display
* @tl: TE listener struct
*
* Removes TE listener and disables TE irq if there are no other listeners.
*
* Returns: 0 on success, otherwise errno on failure
*/
int dsi_display_remove_te_listener(struct dsi_display *display,
struct dsi_display_te_listener *tl);
int dsi_display_dev_probe(struct platform_device *pdev);
int dsi_display_dev_remove(struct platform_device *pdev);

View File

@@ -61,6 +61,8 @@
#define MSM_VERSION_MINOR 2
#define MSM_VERSION_PATCHLEVEL 0
static struct kmem_cache *kmem_vblank_work_pool;
static void msm_fb_output_poll_changed(struct drm_device *dev)
{
struct msm_drm_private *priv = NULL;
@@ -254,7 +256,7 @@ static void vblank_ctrl_worker(struct kthread_work *work)
else
kms->funcs->disable_vblank(kms, priv->crtcs[cur_work->crtc_id]);
kfree(cur_work);
kmem_cache_free(kmem_vblank_work_pool, cur_work);
}
static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
@@ -265,7 +267,7 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
if (!priv || crtc_id >= priv->num_crtcs)
return -EINVAL;
cur_work = kzalloc(sizeof(*cur_work), GFP_ATOMIC);
cur_work = kmem_cache_zalloc(kmem_vblank_work_pool, GFP_ATOMIC);
if (!cur_work)
return -ENOMEM;
@@ -784,10 +786,21 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
kthread_init_worker(&priv->disp_thread[i].worker);
priv->disp_thread[i].dev = ddev;
priv->disp_thread[i].thread =
kthread_run(kthread_worker_fn,
&priv->disp_thread[i].worker,
"crtc_commit:%d", priv->disp_thread[i].crtc_id);
/* Only pin actual display thread to big cluster */
if (i == 0) {
priv->disp_thread[i].thread =
kthread_run_perf_critical(kthread_worker_fn,
&priv->disp_thread[i].worker,
"crtc_commit:%d", priv->disp_thread[i].crtc_id);
pr_info("%i to big cluster", priv->disp_thread[i].crtc_id);
} else {
priv->disp_thread[i].thread =
kthread_run(kthread_worker_fn,
&priv->disp_thread[i].worker,
"crtc_commit:%d", priv->disp_thread[i].crtc_id);
pr_info("%i to little cluster", priv->disp_thread[i].crtc_id);
}
ret = sched_setscheduler(priv->disp_thread[i].thread,
SCHED_FIFO, &param);
if (ret)
@@ -803,10 +816,20 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
kthread_init_worker(&priv->event_thread[i].worker);
priv->event_thread[i].dev = ddev;
priv->event_thread[i].thread =
kthread_run(kthread_worker_fn,
&priv->event_thread[i].worker,
"crtc_event:%d", priv->event_thread[i].crtc_id);
/* Only pin first event thread to big cluster */
if (i == 0) {
priv->event_thread[i].thread =
kthread_run_perf_critical(kthread_worker_fn,
&priv->event_thread[i].worker,
"crtc_event:%d", priv->event_thread[i].crtc_id);
pr_info("%i to big cluster", priv->event_thread[i].crtc_id);
} else {
priv->event_thread[i].thread =
kthread_run(kthread_worker_fn,
&priv->event_thread[i].worker,
"crtc_event:%d", priv->event_thread[i].crtc_id);
pr_info("%i to little cluster", priv->event_thread[i].crtc_id);
}
/**
* event thread should also run at same priority as disp_thread
* because it is handling frame_done events. A lower priority
@@ -851,7 +874,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
* other important events.
*/
kthread_init_worker(&priv->pp_event_worker);
priv->pp_event_thread = kthread_run(kthread_worker_fn,
priv->pp_event_thread = kthread_run_perf_critical(kthread_worker_fn,
&priv->pp_event_worker, "pp_event");
ret = sched_setscheduler(priv->pp_event_thread,
@@ -2315,6 +2338,7 @@ static int __init msm_drm_register(void)
return -EINVAL;
DBG("init");
kmem_vblank_work_pool = KMEM_CACHE(vblank_work, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
msm_smmu_driver_init();
msm_dsi_register();
msm_edp_register();

View File

@@ -1,4 +1,4 @@
/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -551,8 +551,8 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc,
/* display rsc override during solver mode */
if (kms->perf.bw_vote_mode == DISP_RSC_MODE &&
get_sde_rsc_current_state(SDE_RSC_INDEX) ==
SDE_RSC_CMD_STATE) {
get_sde_rsc_current_state(SDE_RSC_INDEX) !=
SDE_RSC_CLK_STATE) {
/* update new bandwidth in all cases */
if (params_changed && ((new->bw_ctl[i] !=
old->bw_ctl[i]) ||
@@ -602,10 +602,9 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc,
}
if (kms->perf.bw_vote_mode == DISP_RSC_MODE &&
((get_sde_rsc_version(SDE_RSC_INDEX) != SDE_RSC_REV_3) ||
(get_sde_rsc_current_state(SDE_RSC_INDEX) != SDE_RSC_CLK_STATE
((get_sde_rsc_current_state(SDE_RSC_INDEX) != SDE_RSC_CLK_STATE
&& params_changed) ||
(get_sde_rsc_current_state(SDE_RSC_INDEX) == SDE_RSC_CLK_STATE
(get_sde_rsc_current_state(SDE_RSC_INDEX) == SDE_RSC_CLK_STATE
&& update_bus)))
sde_rsc_client_trigger_vote(sde_cstate->rsc_client,
update_bus ? true : false);

View File

@@ -716,6 +716,47 @@ static int _sde_debugfs_fps_status(struct inode *inode, struct file *file)
}
#endif
static ssize_t early_wakeup_store(struct device *device,
struct device_attribute *attr, const char *buf, size_t count)
{
struct drm_crtc *crtc;
struct sde_crtc *sde_crtc;
struct msm_drm_private *priv;
u32 crtc_id;
bool trigger;
if (!device || !buf || !count) {
SDE_ERROR("invalid input param(s)\n");
return -EINVAL;
}
if (kstrtobool(buf, &trigger) < 0)
return -EINVAL;
if (!trigger)
return count;
crtc = dev_get_drvdata(device);
if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
SDE_ERROR("invalid crtc\n");
return -EINVAL;
}
sde_crtc = to_sde_crtc(crtc);
priv = crtc->dev->dev_private;
crtc_id = drm_crtc_index(crtc);
if (crtc_id >= ARRAY_SIZE(priv->disp_thread)) {
SDE_ERROR("invalid crtc index[%d]\n", crtc_id);
return -EINVAL;
}
kthread_queue_work(&priv->disp_thread[crtc_id].worker,
&sde_crtc->early_wakeup_work);
return count;
}
static ssize_t set_fps_periodicity(struct device *device,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -885,10 +926,12 @@ static DEVICE_ATTR_RO(vsync_event);
static DEVICE_ATTR(measured_fps, 0444, measured_fps_show, NULL);
static DEVICE_ATTR(fps_periodicity_ms, 0644, fps_periodicity_show,
set_fps_periodicity);
static DEVICE_ATTR_WO(early_wakeup);
static struct attribute *sde_crtc_dev_attrs[] = {
&dev_attr_vsync_event.attr,
&dev_attr_measured_fps.attr,
&dev_attr_fps_periodicity_ms.attr,
&dev_attr_early_wakeup.attr,
NULL
};
@@ -2009,7 +2052,7 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
struct drm_plane_state *state;
struct sde_crtc_state *cstate;
struct sde_plane_state *pstate = NULL;
struct plane_state *pstates = NULL;
struct plane_state pstates[SDE_PSTATES_MAX];
struct sde_format *format;
struct sde_hw_ctl *ctl;
struct sde_hw_mixer *lm;
@@ -2036,10 +2079,7 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
sde_crtc->sbuf_rot_id = 0x0;
sde_crtc->sbuf_rot_id_delta = 0x0;
pstates = kcalloc(SDE_PSTATES_MAX,
sizeof(struct plane_state), GFP_KERNEL);
if (!pstates)
return;
memset(pstates, 0, SDE_PSTATES_MAX * sizeof(struct plane_state));
drm_atomic_crtc_for_each_plane(plane, crtc) {
state = plane->state;
@@ -2080,7 +2120,7 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
format = to_sde_format(msm_framebuffer_format(pstate->base.fb));
if (!format) {
SDE_ERROR("invalid format\n");
goto end;
return;
}
if (pstate->stage == SDE_STAGE_BASE && format->alpha_enable)
@@ -2135,7 +2175,6 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
_sde_crtc_set_src_split_order(crtc, pstates, cnt);
if (lm && lm->ops.setup_dim_layer) {
cstate = to_sde_crtc_state(crtc->state);
for (i = 0; i < cstate->num_dim_layers; i++)
_sde_crtc_setup_dim_layer_cfg(crtc, sde_crtc,
mixer, &cstate->dim_layer[i]);
@@ -2146,9 +2185,6 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
}
_sde_crtc_program_lm_output_roi(crtc);
end:
kfree(pstates);
}
static void _sde_crtc_swap_mixers_for_right_partial_update(
@@ -2257,9 +2293,11 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc,
mixer[i].hw_ctl);
/* clear dim_layer settings */
lm = mixer[i].hw_lm;
if (lm->ops.clear_dim_layer)
lm->ops.clear_dim_layer(lm);
if (sde_crtc_state->num_dim_layers) {
lm = mixer[i].hw_lm;
if (lm->ops.clear_dim_layer)
lm->ops.clear_dim_layer(lm);
}
}
_sde_crtc_swap_mixers_for_right_partial_update(crtc);
@@ -5342,7 +5380,7 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc,
{
struct drm_device *dev;
struct sde_crtc *sde_crtc;
struct plane_state *pstates = NULL;
struct plane_state pstates[SDE_PSTATES_MAX] __aligned(8);
struct sde_crtc_state *cstate;
struct sde_kms *kms;
@@ -5352,7 +5390,7 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc,
int cnt = 0, rc = 0, mixer_width, i, z_pos, mixer_height;
struct sde_multirect_plane_states *multirect_plane = NULL;
struct sde_multirect_plane_states multirect_plane[SDE_MULTIRECT_PLANE_MAX] __aligned(8);
int multirect_count = 0;
const struct drm_plane_state *pipe_staged[SSPP_MAX];
int left_zpos_cnt = 0, right_zpos_cnt = 0;
@@ -5383,17 +5421,8 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc,
goto end;
}
pstates = kcalloc(SDE_PSTATES_MAX,
sizeof(struct plane_state), GFP_KERNEL);
multirect_plane = kcalloc(SDE_MULTIRECT_PLANE_MAX,
sizeof(struct sde_multirect_plane_states),
GFP_KERNEL);
if (!pstates || !multirect_plane) {
rc = -ENOMEM;
goto end;
}
memset(pstates, 0, sizeof(pstates));
memset(multirect_plane, 0, sizeof(multirect_plane));
mode = &state->adjusted_mode;
SDE_DEBUG("%s: check", sde_crtc->name);
@@ -5635,8 +5664,6 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc,
}
end:
kfree(pstates);
kfree(multirect_plane);
_sde_crtc_rp_free_unused(&cstate->rp);
return rc;
}
@@ -6924,6 +6951,40 @@ static void __sde_crtc_idle_notify_work(struct kthread_work *work)
}
}
/*
* __sde_crtc_early_wakeup_work - trigger early wakeup from user space
*/
static void __sde_crtc_early_wakeup_work(struct kthread_work *work)
{
struct sde_crtc *sde_crtc = container_of(work, struct sde_crtc,
early_wakeup_work);
struct drm_crtc *crtc;
struct drm_device *dev;
struct msm_drm_private *priv;
struct sde_kms *sde_kms;
if (!sde_crtc) {
SDE_ERROR("invalid sde crtc\n");
return;
}
if (!sde_crtc->enabled) {
SDE_INFO("sde crtc is not enabled\n");
return;
}
crtc = &sde_crtc->base;
dev = crtc->dev;
if (!dev) {
SDE_ERROR("invalid drm device\n");
return;
}
priv = dev->dev_private;
sde_kms = to_sde_kms(priv->kms);
sde_kms_trigger_early_wakeup(sde_kms, crtc);
}
/* initialize crtc */
struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane)
{
@@ -7015,6 +7076,8 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane)
kthread_init_delayed_work(&sde_crtc->idle_notify_work,
__sde_crtc_idle_notify_work);
kthread_init_work(&sde_crtc->early_wakeup_work,
__sde_crtc_early_wakeup_work);
SDE_DEBUG("%s: successfully initialized crtc\n", sde_crtc->name);
return crtc;
@@ -7088,6 +7151,7 @@ static int _sde_crtc_event_enable(struct sde_kms *kms,
if (!node)
return -ENOMEM;
INIT_LIST_HEAD(&node->list);
INIT_LIST_HEAD(&node->irq.list);
node->func = custom_events[i].func;
node->event = event;
node->state = IRQ_NOINIT;
@@ -7113,8 +7177,6 @@ static int _sde_crtc_event_enable(struct sde_kms *kms,
return ret;
}
INIT_LIST_HEAD(&node->irq.list);
mutex_lock(&crtc->crtc_lock);
ret = node->func(crtc_drm, true, &node->irq);
if (!ret) {

View File

@@ -222,6 +222,7 @@ struct sde_crtc_fps_info {
* @sbuf_rot_id_old: inline rotator id for previous commit
* @sbuf_rot_id_delta: inline rotator id for current delta state
* @idle_notify_work: delayed worker to notify idle timeout to user space
* @early_wakeup_work: work to trigger early wakeup
* @power_event : registered power event handle
* @cur_perf : current performance committed to clock/bandwidth driver
* @rp_lock : serialization lock for resource pool
@@ -292,6 +293,7 @@ struct sde_crtc {
u32 sbuf_rot_id_old;
u32 sbuf_rot_id_delta;
struct kthread_delayed_work idle_notify_work;
struct kthread_work early_wakeup_work;
struct sde_power_event *power_event;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -1973,35 +1973,15 @@ static int _sde_encoder_update_rsc_client(
qsync_mode = sde_connector_get_qsync_mode(
sde_enc->cur_master->connector);
if (IS_SDE_MAJOR_SAME(sde_kms->core_rev, SDE_HW_VER_620)) {
if (sde_encoder_in_clone_mode(drm_enc) ||
!disp_info->is_primary || (disp_info->is_primary &&
qsync_mode))
rsc_state = enable ? SDE_RSC_CLK_STATE :
SDE_RSC_IDLE_STATE;
else if (sde_encoder_check_curr_mode(drm_enc,
MSM_DISPLAY_CMD_MODE))
rsc_state = enable ? SDE_RSC_CMD_STATE :
SDE_RSC_IDLE_STATE;
else if (sde_encoder_check_curr_mode(drm_enc,
MSM_DISPLAY_VIDEO_MODE))
rsc_state = enable ? SDE_RSC_VID_STATE :
SDE_RSC_IDLE_STATE;
} else {
if (sde_encoder_in_clone_mode(drm_enc))
rsc_state = enable ? SDE_RSC_CLK_STATE :
SDE_RSC_IDLE_STATE;
else
rsc_state = enable ? ((disp_info->is_primary &&
(sde_encoder_check_curr_mode(drm_enc,
MSM_DISPLAY_CMD_MODE)) && !qsync_mode) ?
SDE_RSC_CMD_STATE : SDE_RSC_VID_STATE) :
SDE_RSC_IDLE_STATE;
}
if (sde_encoder_in_clone_mode(drm_enc) || !disp_info->is_primary ||
(disp_info->is_primary && qsync_mode))
rsc_state = enable ? SDE_RSC_CLK_STATE : SDE_RSC_IDLE_STATE;
else if (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
rsc_state = enable ? SDE_RSC_CMD_STATE : SDE_RSC_IDLE_STATE;
else if (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE)
rsc_state = enable ? SDE_RSC_VID_STATE : SDE_RSC_IDLE_STATE;
if (IS_SDE_MAJOR_SAME(sde_kms->core_rev, SDE_HW_VER_620) &&
(rsc_state == SDE_RSC_VID_STATE))
rsc_state = SDE_RSC_CLK_STATE;
rsc_state = SDE_RSC_CLK_STATE;
SDE_EVT32(rsc_state, qsync_mode);

View File

@@ -405,6 +405,7 @@ struct sde_encoder_phys_cmd {
atomic_t pending_vblank_cnt;
wait_queue_head_t pending_vblank_wq;
u32 ctl_start_threshold;
struct work_struct ctl_wait_work;
};
/**

View File

@@ -1471,6 +1471,15 @@ static int _sde_encoder_phys_cmd_wait_for_ctl_start(
return ret;
}
static void sde_encoder_phys_cmd_ctl_start_work(struct work_struct *work)
{
struct sde_encoder_phys_cmd *cmd_enc = container_of(work,
typeof(*cmd_enc),
ctl_wait_work);
_sde_encoder_phys_cmd_wait_for_ctl_start(&cmd_enc->base);
}
static int sde_encoder_phys_cmd_wait_for_tx_complete(
struct sde_encoder_phys *phys_enc)
{
@@ -1505,9 +1514,9 @@ static int sde_encoder_phys_cmd_wait_for_commit_done(
/* only required for master controller */
if (sde_encoder_phys_cmd_is_master(phys_enc))
rc = _sde_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
queue_work(system_unbound_wq, &cmd_enc->ctl_wait_work);
if (!rc && sde_encoder_phys_cmd_is_master(phys_enc) &&
if (sde_encoder_phys_cmd_is_master(phys_enc) &&
cmd_enc->autorefresh.cfg.enable)
rc = _sde_encoder_phys_cmd_wait_for_autorefresh_done(phys_enc);
@@ -1592,6 +1601,9 @@ static void sde_encoder_phys_cmd_prepare_commit(
if (!sde_encoder_phys_cmd_is_master(phys_enc))
return;
/* Wait for ctl_start interrupt for the previous commit if needed */
flush_work(&cmd_enc->ctl_wait_work);
SDE_EVT32(DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
cmd_enc->autorefresh.cfg.enable);
@@ -1798,6 +1810,7 @@ struct sde_encoder_phys *sde_encoder_phys_cmd_init(
init_waitqueue_head(&cmd_enc->pending_vblank_wq);
atomic_set(&cmd_enc->autorefresh.kickoff_cnt, 0);
init_waitqueue_head(&cmd_enc->autorefresh.kickoff_wq);
INIT_WORK(&cmd_enc->ctl_wait_work, sde_encoder_phys_cmd_ctl_start_work);
SDE_DEBUG_CMDENC(cmd_enc, "created\n");

Some files were not shown because too many files have changed in this diff Show More