diff --git a/Documentation/cgroup-v1/freezer-subsystem.txt b/Documentation/cgroup-v1/freezer-subsystem.txt index e831cb2b8394..22958ef93cd9 100644 --- a/Documentation/cgroup-v1/freezer-subsystem.txt +++ b/Documentation/cgroup-v1/freezer-subsystem.txt @@ -90,6 +90,18 @@ The following cgroupfs files are created by cgroup freezer. Shows the parent-state. 0 if none of the cgroup's ancestors is frozen; otherwise, 1. +* freezer.killable: Read-write + + When read, returns the killable state of a cgroup - "1" if frozen + tasks will respond to fatal signals, or "0" if they won't. + + When written, this property sets the killable state of the cgroup. + A value equal to "1" will switch the state of all frozen tasks in + the cgroup to TASK_INTERRUPTIBLE (similarly to cgroup v2) and will + make them react to fatal signals. A value of "0" will switch the + state of frozen tasks to TASK_UNINTERRUPTIBLE and they won't respond + to signals unless thawed or unfrozen. + The root cgroup is non-freezable and the above interface files don't exist. diff --git a/Documentation/devicetree/bindings/arm/msm/msm_rtb.txt b/Documentation/devicetree/bindings/arm/msm/msm_rtb.txt deleted file mode 100644 index ae61ebf771e4..000000000000 --- a/Documentation/devicetree/bindings/arm/msm/msm_rtb.txt +++ /dev/null @@ -1,22 +0,0 @@ -Register Trace Buffer (RTB) - -The RTB is used to log discrete events in the system in an uncached buffer that -can be post processed from RAM dumps. The RTB must reserve memory using -the msm specific memory reservation bindings (see -Documentation/devicetree/bindings/arm/msm/memory-reserve.txt). - -Required properties - -- compatible: "qcom,msm-rtb" -- qcom,rtb-size: size of the RTB buffer in bytes - -Optional properties: - -- linux,contiguous-region: phandle reference to a CMA region - -Example: - - qcom,msm-rtb { - compatible = "qcom,msm-rtb"; - qcom,rtb-size = <0x100000>; - }; diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index 5a75fd7e0c27..34f26c5d3ca2 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt @@ -50,7 +50,6 @@ Currently, these files are in /proc/sys/vm: - nr_trim_pages (only if CONFIG_MMU=n) - numa_zonelist_order - oom_dump_tasks -- reap_mem_on_sigkill - oom_kill_allocating_task - overcommit_kbytes - overcommit_memory @@ -658,24 +657,6 @@ The default value is 1 (enabled). ============================================================== -reap_mem_on_sigkill - -This enables or disables the memory reaping for a SIGKILL received -process and that the sending process must have the CAP_KILL capabilities. - -If this is set to 1, when a process receives SIGKILL from a process -that has the capability, CAP_KILL, the process is added into the oom_reaper -queue which can be picked up by the oom_reaper thread to reap the memory of -that process. This reaps for the process which received SIGKILL through -either sys_kill from user or kill_pid from kernel. - -If this is set to 0, we are not reaping memory of a SIGKILL, sent through -either sys_kill from user or kill_pid from kernel, received process. - -The default value is 0 (disabled). - -============================================================== - oom_kill_allocating_task This enables or disables killing the OOM-triggering task in diff --git a/Makefile b/Makefile index 92e396de366c..00a649fcd5f8 100644 --- a/Makefile +++ b/Makefile @@ -506,6 +506,10 @@ CLANG_FLAGS += $(call cc-option, -Wno-bool-operation) CLANG_FLAGS += $(call cc-option, -Wno-unsequenced) KBUILD_CFLAGS += $(CLANG_FLAGS) KBUILD_AFLAGS += $(CLANG_FLAGS) +ifeq ($(ld-name),lld) +KBUILD_CFLAGS += -fuse-ld=lld +endif +KBUILD_CPPFLAGS += -Qunused-arguments export CLANG_FLAGS ifeq ($(ld-name),lld) KBUILD_CFLAGS += -fuse-ld=lld @@ -681,6 +685,8 @@ endif LLVM_AR := llvm-ar LLVM_NM := llvm-nm export LLVM_AR LLVM_NM +# Set O3 optimization level for LTO +LDFLAGS += --plugin-opt=O3 endif # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default diff --git a/arch/Kconfig b/arch/Kconfig index 0a3517cacbe8..4b7d70826e40 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -175,7 +175,7 @@ config ARCH_USE_BUILTIN_BSWAP config KRETPROBES def_bool y - depends on KPROBES && HAVE_KRETPROBES + depends on KPROBES && HAVE_KRETPROBES && ROP_PROTECTION_NONE config USER_RETURN_NOTIFIER bool @@ -333,6 +333,9 @@ config HAVE_PERF_USER_STACK_DUMP config HAVE_ARCH_JUMP_LABEL bool +config HAVE_ARCH_JUMP_LABEL_RELATIVE + bool + config HAVE_RCU_TABLE_FREE bool @@ -1102,6 +1105,16 @@ config STRICT_MODULE_RWX and non-text memory will be made non-executable. This provides protection against certain security exploits (e.g. writing to text) +config ARCH_HAS_REFCOUNT_FULL + bool + select ARCH_HAS_REFCOUNT + help + An architecture selects this when the optimized refcount_t + implementation it provides covers all the cases that + CONFIG_REFCOUNT_FULL covers as well, in which case it makes no + sense to even offer CONFIG_REFCOUNT_FULL as a user selectable + option. + config ARCH_HAS_REFCOUNT bool help @@ -1115,7 +1128,7 @@ config ARCH_HAS_REFCOUNT against bugs in reference counts. config REFCOUNT_FULL - bool "Perform full reference count validation at the expense of speed" + bool "Perform full reference count validation at the expense of speed" if !ARCH_HAS_REFCOUNT_FULL help Enabling this switches the refcounting infrastructure from a fast unchecked atomic_t implementation to a fully state checked diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index c61b1db9e4c9..6527a81d30b5 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -28,7 +28,6 @@ #include #include #include -#include #include /* @@ -62,24 +61,23 @@ void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen); * the bus. Rather than special-case the machine, just let the compiler * generate the access for CPUs prior to ARMv6. */ -#define __raw_readw_no_log(a) (__chk_io_ptr(a), \ - *(volatile unsigned short __force *)(a)) -#define __raw_writew_no_log(v, a) ((void)(__chk_io_ptr(a), \ - *(volatile unsigned short __force *)\ - (a) = (v))) +#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a)) +#define __raw_writew(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v))) #else /* * When running under a hypervisor, we want to avoid I/O accesses with * writeback addressing modes as these incur a significant performance * overhead (the address generation must be emulated in software). */ -static inline void __raw_writew_no_log(u16 val, volatile void __iomem *addr) +#define __raw_writew __raw_writew +static inline void __raw_writew(u16 val, volatile void __iomem *addr) { asm volatile("strh %1, %0" : : "Q" (*(volatile u16 __force *)addr), "r" (val)); } -static inline u16 __raw_readw_no_log(const volatile void __iomem *addr) +#define __raw_readw __raw_readw +static inline u16 __raw_readw(const volatile void __iomem *addr) { u16 val; asm volatile("ldrh %0, %1" @@ -89,19 +87,22 @@ static inline u16 __raw_readw_no_log(const volatile void __iomem *addr) } #endif -static inline void __raw_writeb_no_log(u8 val, volatile void __iomem *addr) +#define __raw_writeb __raw_writeb +static inline void __raw_writeb(u8 val, volatile void __iomem *addr) { asm volatile("strb %1, %0" : : "Qo" (*(volatile u8 __force *)addr), "r" (val)); } -static inline void __raw_writel_no_log(u32 val, volatile void __iomem *addr) +#define __raw_writel __raw_writel +static inline void __raw_writel(u32 val, volatile void __iomem *addr) { asm volatile("str %1, %0" : : "Qo" (*(volatile u32 __force *)addr), "r" (val)); } -static inline void __raw_writeq_no_log(u64 val, volatile void __iomem *addr) +#define __raw_writeq __raw_writeq +static inline void __raw_writeq(u64 val, volatile void __iomem *addr) { register u64 v asm ("r2"); @@ -112,7 +113,8 @@ static inline void __raw_writeq_no_log(u64 val, volatile void __iomem *addr) : "r" (v)); } -static inline u8 __raw_readb_no_log(const volatile void __iomem *addr) +#define __raw_readb __raw_readb +static inline u8 __raw_readb(const volatile void __iomem *addr) { u8 val; asm volatile("ldrb %0, %1" @@ -121,7 +123,8 @@ static inline u8 __raw_readb_no_log(const volatile void __iomem *addr) return val; } -static inline u32 __raw_readl_no_log(const volatile void __iomem *addr) +#define __raw_readl __raw_readl +static inline u32 __raw_readl(const volatile void __iomem *addr) { u32 val; asm volatile("ldr %0, %1" @@ -130,7 +133,8 @@ static inline u32 __raw_readl_no_log(const volatile void __iomem *addr) return val; } -static inline u64 __raw_readq_no_log(const volatile void __iomem *addr) +#define __raw_readq __raw_readq +static inline u64 __raw_readq(const volatile void __iomem *addr) { register u64 val asm ("r2"); @@ -140,48 +144,6 @@ static inline u64 __raw_readq_no_log(const volatile void __iomem *addr) return val; } -/* - * There may be cases when clients don't want to support or can't support the - * logging. The appropriate functions can be used but clients should carefully - * consider why they can't support the logging. - */ - -#define __raw_write_logged(v, a, _t) ({ \ - int _ret; \ - volatile void __iomem *_a = (a); \ - void *_addr = (void __force *)(_a); \ - _ret = uncached_logk(LOGK_WRITEL, _addr); \ - ETB_WAYPOINT; \ - __raw_write##_t##_no_log((v), _a); \ - if (_ret) \ - LOG_BARRIER; \ - }) - - -#define __raw_writeb(v, a) __raw_write_logged((v), (a), b) -#define __raw_writew(v, a) __raw_write_logged((v), (a), w) -#define __raw_writel(v, a) __raw_write_logged((v), (a), l) -#define __raw_writeq(v, a) __raw_write_logged((v), (a), q) - -#define __raw_read_logged(a, _l, _t) ({ \ - unsigned _t __a; \ - const volatile void __iomem *_a = (a); \ - void *_addr = (void __force *)(_a); \ - int _ret; \ - _ret = uncached_logk(LOGK_READL, _addr); \ - ETB_WAYPOINT; \ - __a = __raw_read##_l##_no_log(_a);\ - if (_ret) \ - LOG_BARRIER; \ - __a; \ - }) - - -#define __raw_readb(a) __raw_read_logged((a), b, char) -#define __raw_readw(a) __raw_read_logged((a), w, short) -#define __raw_readl(a) __raw_read_logged((a), l, int) -#define __raw_readq(a) __raw_read_logged((a), q, long long) - /* * Architecture ioremap implementation. */ @@ -363,24 +325,12 @@ extern void _memset_io(volatile void __iomem *, int, size_t); __raw_readl(c)); __r; }) #define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64) \ __raw_readq(c)); __r; }) -#define readb_relaxed_no_log(c) ({ u8 __r = __raw_readb_no_log(c); __r; }) -#define readl_relaxed_no_log(c) ({ u32 __r = le32_to_cpu((__force __le32) \ - __raw_readl_no_log(c)); __r; }) -#define readq_relaxed_no_log(c) ({ u64 __r = le64_to_cpu((__force __le64) \ - __raw_readq_no_log(c)); __r; }) #define writeb_relaxed(v, c) __raw_writeb(v, c) #define writew_relaxed(v, c) __raw_writew((__force u16) cpu_to_le16(v), c) #define writel_relaxed(v, c) __raw_writel((__force u32) cpu_to_le32(v), c) #define writeq_relaxed(v, c) __raw_writeq((__force u64) cpu_to_le64(v), c) -#define writeb_relaxed_no_log(v, c) ((void)__raw_writeb_no_log((v), (c))) -#define writew_relaxed_no_log(v, c) __raw_writew_no_log((__force u16) \ - cpu_to_le16(v), c) -#define writel_relaxed_no_log(v, c) __raw_writel_no_log((__force u32) \ - cpu_to_le32(v), c) -#define writeq_relaxed_no_log(v, c) __raw_writeq_no_log((__force u64) \ - cpu_to_le64(v), c) #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) @@ -401,24 +351,6 @@ extern void _memset_io(volatile void __iomem *, int, size_t); #define writesw(p,d,l) __raw_writesw(p,d,l) #define writesl(p,d,l) __raw_writesl(p,d,l) -#define readb_no_log(c) \ - ({ u8 __v = readb_relaxed_no_log(c); __iormb(); __v; }) -#define readw_no_log(c) \ - ({ u16 __v = readw_relaxed_no_log(c); __iormb(); __v; }) -#define readl_no_log(c) \ - ({ u32 __v = readl_relaxed_no_log(c); __iormb(); __v; }) -#define readq_no_log(c) \ - ({ u64 __v = readq_relaxed_no_log(c); __iormb(); __v; }) - -#define writeb_no_log(v, c) \ - ({ __iowmb(); writeb_relaxed_no_log((v), (c)); }) -#define writew_no_log(v, c) \ - ({ __iowmb(); writew_relaxed_no_log((v), (c)); }) -#define writel_no_log(v, c) \ - ({ __iowmb(); writel_relaxed_no_log((v), (c)); }) -#define writeq_no_log(v, c) \ - ({ __iowmb(); writeq_relaxed_no_log((v), (c)); }) - #ifndef __ARMBE__ static inline void memset_io(volatile void __iomem *dst, unsigned c, size_t count) diff --git a/arch/arm/kernel/io.c b/arch/arm/kernel/io.c index a20e48c50d85..c894fdddf592 100644 --- a/arch/arm/kernel/io.c +++ b/arch/arm/kernel/io.c @@ -46,21 +46,21 @@ EXPORT_SYMBOL(atomic_io_modify); void _memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) { while (count && (!IO_CHECK_ALIGN(from, 8) || !IO_CHECK_ALIGN(to, 8))) { - *(u8 *)to = readb_relaxed_no_log(from); + *(u8 *)to = readb_relaxed(from); from++; to++; count--; } while (count >= 8) { - *(u64 *)to = readq_relaxed_no_log(from); + *(u64 *)to = readq_relaxed(from); from += 8; to += 8; count -= 8; } while (count) { - *(u8 *)to = readb_relaxed_no_log(from); + *(u8 *)to = readb_relaxed(from); from++; to++; count--; @@ -76,21 +76,21 @@ void _memcpy_toio(volatile void __iomem *to, const void *from, size_t count) void *p = (void __force *)to; while (count && (!IO_CHECK_ALIGN(p, 8) || !IO_CHECK_ALIGN(from, 8))) { - writeb_relaxed_no_log(*(volatile u8 *)from, p); + writeb_relaxed(*(volatile u8 *)from, p); from++; p++; count--; } while (count >= 8) { - writeq_relaxed_no_log(*(volatile u64 *)from, p); + writeq_relaxed(*(volatile u64 *)from, p); from += 8; p += 8; count -= 8; } while (count) { - writeb_relaxed_no_log(*(volatile u8 *)from, p); + writeb_relaxed(*(volatile u8 *)from, p); from++; p++; count--; @@ -111,19 +111,19 @@ void _memset_io(volatile void __iomem *dst, int c, size_t count) qc |= qc << 32; while (count && !IO_CHECK_ALIGN(p, 8)) { - writeb_relaxed_no_log(c, p); + writeb_relaxed(c, p); p++; count--; } while (count >= 8) { - writeq_relaxed_no_log(qc, p); + writeq_relaxed(qc, p); p += 8; count -= 8; } while (count) { - writeb_relaxed_no_log(c, p); + writeb_relaxed(c, p); p++; count--; } diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 8e57ee6b48d9..65f69948d85d 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -12,11 +12,13 @@ config ARM64 select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_ELF_RANDOMIZE + select ARCH_HAS_FAST_MULTIPLIER select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA select ARCH_HAS_KCOV select ARCH_HAS_SET_DIRECT_MAP + select ARCH_HAS_REFCOUNT_FULL select ARCH_HAS_SET_MEMORY select ARCH_HAS_SG_CHAIN select ARCH_HAS_STRICT_KERNEL_RWX @@ -93,6 +95,7 @@ config ARM64 select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48) select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN + select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index bbe57e00794c..706e11c89935 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -84,6 +84,10 @@ ifeq ($(CONFIG_COMPAT_VDSO), y) else ifeq ($(cc-name),clang) export CLANG_TRIPLE_ARM32 ?= $(CROSS_COMPILE_ARM32) export CLANG_TARGET_ARM32 := --target=$(notdir $(CLANG_TRIPLE_ARM32:%-=%)) + export GCC_TOOLCHAIN32_DIR := $(dir $(shell which $(CROSS_COMPILE_ARM32)ld)) + export GCC_TOOLCHAIN32 := $(realpath $(GCC_TOOLCHAIN32_DIR)/..) + export CLANG_PREFIX32 := --prefix=$(GCC_TOOLCHAIN32_DIR) + export CLANG_GCC32_TC := --gcc-toolchain=$(GCC_TOOLCHAIN32) export CONFIG_VDSO32 := y vdso32 := -DCONFIG_VDSO32=1 else ifeq ($(shell which $(CROSS_COMPILE_ARM32)$(cc-name) 2> /dev/null),) diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-ss-fhd-ea8076-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-ss-fhd-ea8076-cmd.dtsi index 7560fd9f8b69..e70a4363314e 100644 --- a/arch/arm64/boot/dts/qcom/dsi-panel-ss-fhd-ea8076-cmd.dtsi +++ b/arch/arm64/boot/dts/qcom/dsi-panel-ss-fhd-ea8076-cmd.dtsi @@ -62,7 +62,6 @@ qcom,mdss-panel-on-dimming-delay = <120>; /* IRQF_ONESHOT | IRQF_TRIGGER_FALLING */ /* trig-flags: falling-0x0002 rasing-0x0001 */ - qcom,esd-err-irq-gpio = <&tlmm 5 0x2002>; qcom,disp-doze-lpm-backlight = <20>; qcom,disp-doze-hbm-backlight = <266>; diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-ss-fhd-ea8076-global-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-ss-fhd-ea8076-global-cmd.dtsi index c12cb9d993e5..7667326d350a 100644 --- a/arch/arm64/boot/dts/qcom/dsi-panel-ss-fhd-ea8076-global-cmd.dtsi +++ b/arch/arm64/boot/dts/qcom/dsi-panel-ss-fhd-ea8076-global-cmd.dtsi @@ -62,7 +62,6 @@ qcom,mdss-panel-on-dimming-delay = <120>; /* IRQF_ONESHOT | IRQF_TRIGGER_FALLING */ /* trig-flags: falling-0x0002 rasing-0x0001 */ - qcom,esd-err-irq-gpio = <&tlmm 5 0x2002>; qcom,disp-doze-lpm-backlight = <20>; qcom,disp-doze-hbm-backlight = <266>; diff --git a/arch/arm64/boot/dts/qcom/raphael-sm8150.dtsi b/arch/arm64/boot/dts/qcom/raphael-sm8150.dtsi index 45f69abba51c..adac3b93cdf7 100644 --- a/arch/arm64/boot/dts/qcom/raphael-sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/raphael-sm8150.dtsi @@ -115,6 +115,7 @@ And public attribution of xiaomi platforms(like F1 and so and) qcom,sw-jeita-enable; qcom,step-charging-enable; qcom,wd-bark-time-secs = <16>; + google,wdog_snarl_disable; }; &qupv3_se1_i2c { diff --git a/arch/arm64/boot/dts/qcom/sm8150-gpu.dtsi b/arch/arm64/boot/dts/qcom/sm8150-gpu.dtsi index 651e361a2fcd..095f49b74932 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-gpu.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-gpu.dtsi @@ -82,8 +82,7 @@ qcom,gpu-quirk-secvid-set-once; qcom,gpu-quirk-cx-gdsc; - qcom,idle-timeout = <80>; //msecs - qcom,no-nap; + qcom,idle-timeout = <64>; //msecs qcom,highest-bank-bit = <15>; @@ -100,8 +99,6 @@ tzone-names = "gpuss-0-usr", "gpuss-1-usr"; - qcom,pm-qos-active-latency = <44>; - clocks = <&clock_gpucc GPU_CC_CXO_CLK>, <&clock_gcc GCC_DDRSS_GPU_AXI_CLK>, <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>, diff --git a/arch/arm64/boot/dts/qcom/sm8150-v2.dtsi b/arch/arm64/boot/dts/qcom/sm8150-v2.dtsi index a085ef95734f..d9da79994ba1 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-v2.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-v2.dtsi @@ -93,6 +93,11 @@ #include "msm-arm-smmu-sm8150-v2.dtsi" &pcie0 { + + qcom,msm-bus,vectors-KBps = + <100 512 0 0>, + <100 512 500 2000000>; + reg = <0x1c00000 0x4000>, <0x1c06000 0x1000>, <0x60000000 0xf1d>, @@ -203,6 +208,11 @@ }; &pcie1 { + + qcom,msm-bus,vectors-KBps = + <100 512 0 0>, + <100 512 500 2000000>; + reg = <0x1c08000 0x4000>, <0x1c0e000 0x2000>, <0x40000000 0xf1d>, @@ -1047,8 +1057,8 @@ < 1401600 998400000 >, < 1708800 1267200000 >, < 2016000 1344000000 >, - < 2419200 1536000000 >, - < 2841600 1612800000 >; + < 2227200 1536000000 >, + < 2419200 1612800000 >; }; &cpu7_cpu_l3_latmon { diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi index e3537d75f942..c83cb2f932e6 100644 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi @@ -549,7 +549,7 @@ }; chosen { - bootargs = "rcupdate.rcu_expedited=1 rcu_nocbs=0-7 cgroup.memory=nokmem,nosocket"; + bootargs = "rcu_nocbs=0-7 cgroup.memory=nokmem,nosocket noirqdebug"; }; soc: soc { }; diff --git a/arch/arm64/boot/dts/qcom/xiaomi-sm8150-common.dtsi b/arch/arm64/boot/dts/qcom/xiaomi-sm8150-common.dtsi index e615a202658a..1854f5b40945 100644 --- a/arch/arm64/boot/dts/qcom/xiaomi-sm8150-common.dtsi +++ b/arch/arm64/boot/dts/qcom/xiaomi-sm8150-common.dtsi @@ -449,6 +449,16 @@ status = "ok"; }; +&thermal_zones { + gpuss-1-usr { + status = "disabled"; + }; +}; + +&msm_gpu { + tzone-names = "gpuss-0-usr"; +}; + &usb2_phy1 { status = "ok"; }; diff --git a/arch/arm64/configs/raphael_defconfig b/arch/arm64/configs/raphael_defconfig index 1fbb13114b42..6d86e8afee00 100644 --- a/arch/arm64/configs/raphael_defconfig +++ b/arch/arm64/configs/raphael_defconfig @@ -12,6 +12,7 @@ CONFIG_ARCH_MMAP_RND_BITS_MIN=18 CONFIG_ARCH_MMAP_RND_BITS_MAX=24 CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_NO_IOPORT_MAP=y CONFIG_STACKTRACE_SUPPORT=y CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 CONFIG_LOCKDEP_SUPPORT=y @@ -28,7 +29,6 @@ CONFIG_ARCH_DMA_ADDR_T_64BIT=y CONFIG_NEED_DMA_MAP_STATE=y CONFIG_NEED_SG_DMA_LENGTH=y CONFIG_SMP=y -CONFIG_HOTPLUG_SIZE_BITS=29 CONFIG_SWIOTLB=y CONFIG_IOMMU_HELPER=y CONFIG_KERNEL_MODE_NEON=y @@ -61,9 +61,8 @@ CONFIG_POSIX_MQUEUE_SYSCTL=y CONFIG_CROSS_MEMORY_ATTACH=y # CONFIG_FHANDLE is not set # CONFIG_USELIB is not set -CONFIG_AUDIT=y +# CONFIG_AUDIT is not set CONFIG_HAVE_ARCH_AUDITSYSCALL=y -# CONFIG_AUDITSYSCALL is not set # # IRQ subsystem @@ -76,13 +75,9 @@ CONFIG_GENERIC_IRQ_MIGRATION=y CONFIG_HARDIRQS_SW_RESEND=y CONFIG_IRQ_DOMAIN=y CONFIG_IRQ_DOMAIN_HIERARCHY=y -CONFIG_GENERIC_MSI_IRQ=y -CONFIG_GENERIC_MSI_IRQ_DOMAIN=y CONFIG_HANDLE_DOMAIN_IRQ=y -# CONFIG_IRQ_DOMAIN_DEBUG is not set CONFIG_IRQ_FORCED_THREADING=y CONFIG_SPARSE_IRQ=y -# CONFIG_GENERIC_IRQ_DEBUGFS is not set CONFIG_ARCH_CLOCKSOURCE_DATA=y CONFIG_GENERIC_TIME_VSYSCALL=y CONFIG_GENERIC_CLOCKEVENTS=y @@ -143,14 +138,18 @@ CONFIG_GENERIC_SCHED_CLOCK=y # # FAIR Scheuler tunables # -# CONFIG_PELT_UTIL_HALFLIFE_32 is not set -CONFIG_PELT_UTIL_HALFLIFE_16=y +CONFIG_PELT_UTIL_HALFLIFE_32=y +# CONFIG_PELT_UTIL_HALFLIFE_16 is not set # CONFIG_PELT_UTIL_HALFLIFE_8 is not set CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y CONFIG_CGROUPS=y -# CONFIG_MEMCG is not set +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y CONFIG_BLK_CGROUP=y # CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_CGROUP_WRITEBACK=y CONFIG_CGROUP_SCHED=y CONFIG_FAIR_GROUP_SCHED=y CONFIG_RT_GROUP_SCHED=y @@ -163,7 +162,6 @@ CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_CPUACCT=y # CONFIG_CGROUP_PERF is not set CONFIG_CGROUP_BPF=y -# CONFIG_CGROUP_DEBUG is not set CONFIG_SOCK_CGROUP_DATA=y CONFIG_SCHED_CORE_CTL=y # CONFIG_CHECKPOINT_RESTORE is not set @@ -200,7 +198,6 @@ CONFIG_MULTIUSER=y # CONFIG_SYSCTL_SYSCALL is not set CONFIG_POSIX_TIMERS=y CONFIG_KALLSYMS=y -CONFIG_KALLSYMS_ALL=y # CONFIG_KALLSYMS_ABSOLUTE_PERCPU is not set CONFIG_KALLSYMS_BASE_RELATIVE=y CONFIG_PRINTK=y @@ -219,7 +216,6 @@ CONFIG_SHMEM=y CONFIG_AIO=y CONFIG_ADVISE_SYSCALLS=y # CONFIG_USERFAULTFD is not set -CONFIG_PCI_QUIRKS=y CONFIG_MEMBARRIER=y CONFIG_EMBEDDED=y CONFIG_HAVE_PERF_EVENTS=y @@ -230,9 +226,9 @@ CONFIG_HAVE_PERF_EVENTS=y # CONFIG_PERF_EVENTS=y # CONFIG_PERF_USER_SHARE is not set -# CONFIG_DEBUG_PERF_USE_VMALLOC is not set CONFIG_VM_EVENT_COUNTERS=y # CONFIG_SLUB_DEBUG is not set +# CONFIG_SLUB_MEMCG_SYSFS_ON is not set # CONFIG_COMPAT_BRK is not set # CONFIG_SLAB is not set CONFIG_SLUB=y @@ -242,11 +238,10 @@ CONFIG_SLAB_MERGE_DEFAULT=y # CONFIG_SLAB_FREELIST_HARDENED is not set CONFIG_SLUB_CPU_PARTIAL=y # CONFIG_SYSTEM_DATA_VERIFICATION is not set -CONFIG_PROFILING=y -CONFIG_TRACEPOINTS=y +# CONFIG_PROFILING is not set CONFIG_JUMP_LABEL=y # CONFIG_STATIC_KEYS_SELFTEST is not set -CONFIG_UPROBES=y +# CONFIG_UPROBES is not set # CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y CONFIG_HAVE_KPROBES=y @@ -264,13 +259,13 @@ CONFIG_HAVE_HW_BREAKPOINT=y CONFIG_HAVE_PERF_REGS=y CONFIG_HAVE_PERF_USER_STACK_DUMP=y CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y CONFIG_HAVE_RCU_TABLE_FREE=y CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y CONFIG_HAVE_CMPXCHG_LOCAL=y CONFIG_HAVE_CMPXCHG_DOUBLE=y CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_SECCOMP_FILTER=y CONFIG_HAVE_GCC_PLUGINS=y # CONFIG_GCC_PLUGINS is not set CONFIG_HAVE_CC_STACKPROTECTOR=y @@ -313,14 +308,13 @@ CONFIG_VMAP_STACK=y CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y CONFIG_STRICT_KERNEL_RWX=y CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y -CONFIG_REFCOUNT_FULL=y -# CONFIG_PANIC_ON_REFCOUNT_ERROR is not set +CONFIG_ARCH_HAS_REFCOUNT_FULL=y +CONFIG_ARCH_HAS_REFCOUNT=y CONFIG_ARCH_HAS_RELR=y # # GCOV-based kernel profiling # -# CONFIG_GCOV_KERNEL is not set CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y CONFIG_HAVE_GENERIC_DMA_COHERENT=y CONFIG_RT_MUTEXES=y @@ -329,14 +323,13 @@ CONFIG_BASE_SMALL=0 CONFIG_MODULES_TREE_LOOKUP=y CONFIG_BLOCK=y CONFIG_BLK_SCSI_REQUEST=y -CONFIG_BLK_DEV_BSG=y +# CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_BSGLIB is not set # CONFIG_BLK_DEV_INTEGRITY is not set # CONFIG_BLK_DEV_ZONED is not set # CONFIG_BLK_DEV_THROTTLING is not set # CONFIG_BLK_CMDLINE_PARSER is not set # CONFIG_BLK_WBT is not set -CONFIG_BLK_DEBUG_FS=y # CONFIG_BLK_SED_OPAL is not set # @@ -363,7 +356,6 @@ CONFIG_EFI_PARTITION=y # CONFIG_SYSV68_PARTITION is not set # CONFIG_CMDLINE_PARTITION is not set CONFIG_BLOCK_COMPAT=y -CONFIG_BLK_MQ_PCI=y # # IO Schedulers @@ -437,50 +429,20 @@ CONFIG_ARCH_SM8150=y # # Bus support # -CONFIG_PCI=y -CONFIG_PCI_DOMAINS=y -CONFIG_PCI_DOMAINS_GENERIC=y -CONFIG_PCI_SYSCALL=y -# CONFIG_PCIEPORTBUS is not set -CONFIG_PCI_BUS_ADDR_T_64BIT=y -CONFIG_PCI_MSI=y -CONFIG_PCI_MSI_IRQ_DOMAIN=y -# CONFIG_PCI_DEBUG is not set -# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set -# CONFIG_PCI_STUB is not set -# CONFIG_PCI_IOV is not set -# CONFIG_PCI_PRI is not set -# CONFIG_PCI_PASID is not set -# CONFIG_HOTPLUG_PCI is not set +# CONFIG_PCI is not set +# CONFIG_PCI_DOMAINS is not set +# CONFIG_PCI_DOMAINS_GENERIC is not set +# CONFIG_PCI_SYSCALL is not set # # DesignWare PCI Core Support # -# CONFIG_PCIE_DW_PLAT is not set -# CONFIG_PCI_HISI is not set -# CONFIG_PCIE_QCOM is not set -# CONFIG_PCIE_KIRIN is not set - -# -# PCI host controller drivers -# -CONFIG_PCI_MSM=y -CONFIG_PCI_MSM_MSI=y -# CONFIG_PCI_HOST_GENERIC is not set -# CONFIG_PCI_XGENE is not set -# CONFIG_PCI_HOST_THUNDER_PEM is not set -# CONFIG_PCI_HOST_THUNDER_ECAM is not set # # PCI Endpoint # # CONFIG_PCI_ENDPOINT is not set -# -# PCI switch controller drivers -# -# CONFIG_PCI_SW_SWITCHTEC is not set - # # Kernel Features # @@ -556,11 +518,7 @@ CONFIG_HAVE_MEMBLOCK=y CONFIG_NO_BOOTMEM=y CONFIG_MEMORY_ISOLATION=y # CONFIG_HAVE_BOOTMEM_INFO_NODE is not set -CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTPLUG_SPARSE=y -CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y -CONFIG_MEMORY_HOTPLUG_MOVABLE_NODE=y -CONFIG_MEMORY_HOTREMOVE=y +# CONFIG_MEMORY_HOTPLUG is not set CONFIG_SPLIT_PTLOCK_CPUS=4 CONFIG_COMPACTION=y CONFIG_MIGRATION=y @@ -572,11 +530,9 @@ CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y # CONFIG_MEMORY_FAILURE is not set # CONFIG_TRANSPARENT_HUGEPAGE is not set # CONFIG_ARCH_WANTS_THP_SWAP is not set -# CONFIG_CLEANCACHE is not set +CONFIG_CLEANCACHE=y # CONFIG_FRONTSWAP is not set CONFIG_CMA=y -# CONFIG_CMA_DEBUG is not set -# CONFIG_CMA_DEBUGFS is not set CONFIG_CMA_AREAS=7 # CONFIG_ZPOOL is not set # CONFIG_ZBUD is not set @@ -586,7 +542,6 @@ CONFIG_ZSMALLOC=y CONFIG_GENERIC_EARLY_IOREMAP=y CONFIG_BALANCE_ANON_FILE_RECLAIM=y # CONFIG_IDLE_PAGE_TRACKING is not set -CONFIG_FRAME_VECTOR=y # CONFIG_PERCPU_STATS is not set CONFIG_ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT=y CONFIG_SPECULATIVE_PAGE_FAULT=y @@ -595,8 +550,7 @@ CONFIG_PROCESS_RECLAIM=y # CONFIG_FORCE_ALLOC_FROM_DMA_ZONE is not set CONFIG_ARM64_DMA_USE_IOMMU=y CONFIG_ARM64_DMA_IOMMU_ALIGNMENT=9 -CONFIG_ARCH_MEMORY_PROBE=y -CONFIG_SECCOMP=y +# CONFIG_SECCOMP is not set # CONFIG_PARAVIRT is not set # CONFIG_PARAVIRT_TIME_ACCOUNTING is not set # CONFIG_KEXEC is not set @@ -684,7 +638,7 @@ CONFIG_PM_SLEEP_MONITOR=y CONFIG_PM_OPP=y CONFIG_PM_CLK=y CONFIG_PM_GENERIC_DOMAINS=y -CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set CONFIG_PM_GENERIC_DOMAINS_SLEEP=y CONFIG_PM_GENERIC_DOMAINS_OF=y CONFIG_CPU_PM=y @@ -919,7 +873,6 @@ CONFIG_NETFILTER_XT_CONNMARK=y # # Xtables targets # -# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set # CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y CONFIG_NETFILTER_XT_TARGET_CONNMARK=y @@ -1099,7 +1052,6 @@ CONFIG_INET_SCTP_DIAG=y # CONFIG_TIPC is not set # CONFIG_ATM is not set CONFIG_L2TP=y -# CONFIG_L2TP_DEBUGFS is not set CONFIG_L2TP_V3=y CONFIG_L2TP_IP=y CONFIG_L2TP_ETH=y @@ -1148,7 +1100,10 @@ CONFIG_NET_SCH_FQ_CODEL=y CONFIG_NET_SCH_PIE=y CONFIG_NET_SCH_INGRESS=y # CONFIG_NET_SCH_PLUG is not set -# CONFIG_NET_SCH_DEFAULT is not set +CONFIG_NET_SCH_DEFAULT=y +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" # # Classification @@ -1196,7 +1151,7 @@ CONFIG_NET_ACT_SKBEDIT=y # CONFIG_NET_CLS_IND is not set CONFIG_NET_SCH_FIFO=y # CONFIG_DCB is not set -# CONFIG_DNS_RESOLVER is not set +CONFIG_DNS_RESOLVER=y # CONFIG_BATMAN_ADV is not set # CONFIG_OPENVSWITCH is not set # CONFIG_VSOCKETS is not set @@ -1209,9 +1164,8 @@ CONFIG_NET_SWITCHDEV=y CONFIG_QRTR=y CONFIG_QRTR_NODE_ID=1 CONFIG_QRTR_SMD=y -CONFIG_QRTR_MHI=y # CONFIG_QRTR_USB is not set -CONFIG_QRTR_FIFO=y +# CONFIG_QRTR_FIFO is not set # CONFIG_NET_NCSI is not set # CONFIG_RMNET_DATA is not set # CONFIG_RMNET_USB is not set @@ -1233,7 +1187,6 @@ CONFIG_SOCKEV_NLMCAST=y # Network testing # # CONFIG_NET_PKTGEN is not set -# CONFIG_NET_DROP_MONITOR is not set # CONFIG_HAMRADIO is not set # CONFIG_CAN is not set CONFIG_BT=y @@ -1244,8 +1197,6 @@ CONFIG_BT_BREDR=y CONFIG_BT_HS=y CONFIG_BT_LE=y # CONFIG_BT_LEDS is not set -# CONFIG_BT_SELFTEST is not set -# CONFIG_BT_DEBUGFS is not set # # Bluetooth device drivers @@ -1276,7 +1227,6 @@ CONFIG_CFG80211_CERTIFICATION_ONUS=y CONFIG_CFG80211_REG_CELLULAR_HINTS=y # CONFIG_CFG80211_REG_RELAX_NO_IR is not set CONFIG_CFG80211_DEFAULT_PS=y -# CONFIG_CFG80211_DEBUGFS is not set CONFIG_CFG80211_INTERNAL_REGDB=y CONFIG_CFG80211_CRDA_SUPPORT=y # CONFIG_CFG80211_WEXT is not set @@ -1325,10 +1275,7 @@ CONFIG_EXTRA_FIRMWARE="" CONFIG_FW_LOADER_USER_HELPER=y CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y # CONFIG_FW_CACHE is not set -CONFIG_ALLOW_DEV_COREDUMP=y -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_ALLOW_DEV_COREDUMP is not set # CONFIG_SYS_HYPERVISOR is not set # CONFIG_GENERIC_CPU_DEVICES is not set CONFIG_GENERIC_CPU_AUTOPROBE=y @@ -1340,7 +1287,6 @@ CONFIG_REGMAP_SPI=y CONFIG_REGMAP_SPMI=y CONFIG_REGMAP_MMIO=y # CONFIG_REGMAP_WCD_IRQ is not set -# CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS is not set CONFIG_DMA_SHARED_BUFFER=y # CONFIG_DMA_FENCE_TRACE is not set CONFIG_DMA_CMA=y @@ -1363,22 +1309,18 @@ CONFIG_GENERIC_ARCH_TOPOLOGY=y # CONFIG_ARM_CCI5xx_PMU is not set # CONFIG_ARM_CCN is not set # CONFIG_BRCMSTB_GISB_ARB is not set -CONFIG_QCOM_EBI2=y +# CONFIG_QCOM_EBI2 is not set # CONFIG_SIMPLE_PM_BUS is not set # CONFIG_VEXPRESS_CONFIG is not set -CONFIG_MHI_BUS=y -# CONFIG_MHI_DEBUG is not set +# CONFIG_MHI_BUS is not set # # MHI controllers # -CONFIG_MHI_QCOM=y # # MHI device support # -CONFIG_MHI_NETDEV=y -CONFIG_MHI_UCI=y # CONFIG_CONNECTOR is not set # CONFIG_GNSS is not set # CONFIG_MTD is not set @@ -1389,12 +1331,9 @@ CONFIG_OF_FLATTREE=y CONFIG_OF_EARLY_FLATTREE=y CONFIG_OF_KOBJ=y CONFIG_OF_ADDRESS=y -CONFIG_OF_ADDRESS_PCI=y CONFIG_OF_IRQ=y CONFIG_OF_NET=y CONFIG_OF_MDIO=y -CONFIG_OF_PCI=y -CONFIG_OF_PCI_IRQ=y CONFIG_OF_RESERVED_MEM=y CONFIG_OF_SLIMBUS=y # CONFIG_OF_OVERLAY is not set @@ -1402,29 +1341,21 @@ CONFIG_OF_BATTERYDATA=y # CONFIG_PARPORT is not set CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_NULL_BLK is not set -# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set CONFIG_ZRAM=y CONFIG_ZRAM_WRITEBACK=y -# CONFIG_ZRAM_MEMORY_TRACKING is not set -CONFIG_ZRAM_DEFAULT_COMP_ALGORITHM="lzo" -# CONFIG_BLK_DEV_DAC960 is not set -# CONFIG_BLK_DEV_UMEM is not set +CONFIG_ZRAM_DEFAULT_COMP_ALGORITHM="lz4" # CONFIG_BLK_DEV_COW_COMMON is not set CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_LOOP_MIN_COUNT=16 # CONFIG_BLK_DEV_CRYPTOLOOP is not set # CONFIG_BLK_DEV_DRBD is not set # CONFIG_BLK_DEV_NBD is not set -# CONFIG_BLK_DEV_SKD is not set -# CONFIG_BLK_DEV_SX8 is not set CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=8192 # CONFIG_CDROM_PKTCDVD is not set # CONFIG_ATA_OVER_ETH is not set # CONFIG_BLK_DEV_RBD is not set -# CONFIG_BLK_DEV_RSXX is not set -# CONFIG_BLK_DEV_NVME is not set # CONFIG_NVME_FC is not set # CONFIG_NVME_TARGET is not set @@ -1434,14 +1365,10 @@ CONFIG_BLK_DEV_RAM_SIZE=8192 # CONFIG_SENSORS_LIS3LV02D is not set # CONFIG_AD525X_DPOT is not set # CONFIG_DUMMY_IRQ is not set -# CONFIG_PHANTOM is not set -# CONFIG_SGI_IOC4 is not set CONFIG_TI_DRV8846=y CONFIG_SIMTRAY_STATUS=y -# CONFIG_TIFM_CORE is not set # CONFIG_ICS932S401 is not set # CONFIG_ENCLOSURE_SERVICES is not set -# CONFIG_HP_ILO is not set # CONFIG_QCOM_COINCELL is not set # CONFIG_APDS9802ALS is not set # CONFIG_ISL29003 is not set @@ -1457,12 +1384,10 @@ CONFIG_SIMTRAY_STATUS=y # CONFIG_SRAM is not set CONFIG_HDCP_QSEECOM=y CONFIG_QSEECOM=y -# CONFIG_PCI_ENDPOINT_TEST is not set CONFIG_UID_SYS_STATS=y # CONFIG_UID_SYS_STATS_DEBUG is not set # CONFIG_MEMORY_STATE_TIME is not set # CONFIG_QPNP_MISC is not set -# CONFIG_TEST_IRQ_REQUESTER is not set CONFIG_AKM09970=y # CONFIG_C2PORT is not set @@ -1476,7 +1401,6 @@ CONFIG_AKM09970=y # CONFIG_EEPROM_93CX6 is not set # CONFIG_EEPROM_93XX46 is not set # CONFIG_EEPROM_IDT_89HPESX is not set -# CONFIG_CB710_CORE is not set # # Texas Instruments shared transport line discipline @@ -1521,7 +1445,6 @@ CONFIG_AKM09970=y # # VOP Driver # -# CONFIG_GENWQE is not set # CONFIG_ECHO is not set # CONFIG_CXL_BASE is not set # CONFIG_CXL_AFU_DRIVER_OPS is not set @@ -1568,54 +1491,13 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_LOWLEVEL=y # CONFIG_ISCSI_TCP is not set # CONFIG_ISCSI_BOOT_SYSFS is not set -# CONFIG_SCSI_CXGB3_ISCSI is not set -# CONFIG_SCSI_CXGB4_ISCSI is not set -# CONFIG_SCSI_BNX2_ISCSI is not set -# CONFIG_BE2ISCSI is not set -# CONFIG_BLK_DEV_3W_XXXX_RAID is not set -# CONFIG_SCSI_HPSA is not set -# CONFIG_SCSI_3W_9XXX is not set -# CONFIG_SCSI_3W_SAS is not set -# CONFIG_SCSI_ACARD is not set -# CONFIG_SCSI_AACRAID is not set -# CONFIG_SCSI_AIC7XXX is not set -# CONFIG_SCSI_AIC79XX is not set -# CONFIG_SCSI_AIC94XX is not set -# CONFIG_SCSI_MVSAS is not set -# CONFIG_SCSI_MVUMI is not set -# CONFIG_SCSI_ADVANSYS is not set -# CONFIG_SCSI_ARCMSR is not set -# CONFIG_SCSI_ESAS2R is not set -# CONFIG_MEGARAID_NEWGEN is not set -# CONFIG_MEGARAID_LEGACY is not set -# CONFIG_MEGARAID_SAS is not set -# CONFIG_SCSI_MPT3SAS is not set -# CONFIG_SCSI_MPT2SAS is not set -# CONFIG_SCSI_SMARTPQI is not set CONFIG_SCSI_UFSHCD=y -# CONFIG_SCSI_UFSHCD_PCI is not set CONFIG_SCSI_UFSHCD_PLATFORM=y # CONFIG_SCSI_UFS_DWC_TC_PLATFORM is not set CONFIG_SCSI_UFS_QCOM=y CONFIG_SCSI_UFS_QCOM_ICE=y # CONFIG_SCSI_UFSHCD_CMD_LOGGING is not set -# CONFIG_SCSI_HPTIOP is not set -# CONFIG_SCSI_SNIC is not set -# CONFIG_SCSI_DMX3191D is not set -# CONFIG_SCSI_FUTURE_DOMAIN is not set -# CONFIG_SCSI_IPS is not set -# CONFIG_SCSI_INITIO is not set -# CONFIG_SCSI_INIA100 is not set -# CONFIG_SCSI_STEX is not set -# CONFIG_SCSI_SYM53C8XX_2 is not set -# CONFIG_SCSI_QLOGIC_1280 is not set -# CONFIG_SCSI_QLA_ISCSI is not set -# CONFIG_SCSI_DC395x is not set -# CONFIG_SCSI_AM53C974 is not set -# CONFIG_SCSI_WD719X is not set # CONFIG_SCSI_DEBUG is not set -# CONFIG_SCSI_PMCRAID is not set -# CONFIG_SCSI_PM8001 is not set # CONFIG_SCSI_LOWLEVEL_PCMCIA is not set # CONFIG_SCSI_DH is not set # CONFIG_SCSI_OSD_INITIATOR is not set @@ -1652,20 +1534,12 @@ CONFIG_DM_VERITY_FEC=y # CONFIG_DM_ANDROID_VERITY_AT_MOST_ONCE_DEFAULT_ENABLED is not set CONFIG_DM_BOW=y # CONFIG_TARGET_CORE is not set -# CONFIG_FUSION is not set - -# -# IEEE 1394 (FireWire) support -# -# CONFIG_FIREWIRE is not set -# CONFIG_FIREWIRE_NOSY is not set CONFIG_NETDEVICES=y CONFIG_MII=y CONFIG_NET_CORE=y CONFIG_BONDING=y CONFIG_DUMMY=y # CONFIG_EQUALIZER is not set -# CONFIG_NET_FC is not set # CONFIG_IFB is not set # CONFIG_NET_TEAM is not set # CONFIG_MACVLAN is not set @@ -1681,7 +1555,6 @@ CONFIG_TUN=y # CONFIG_TUN_VNET_CROSS_LE is not set # CONFIG_VETH is not set # CONFIG_NLMON is not set -# CONFIG_ARCNET is not set # # CAIF transport drivers @@ -1691,194 +1564,41 @@ CONFIG_TUN=y # Distributed Switch Architecture drivers # CONFIG_ETHERNET=y -CONFIG_NET_VENDOR_3COM=y -# CONFIG_VORTEX is not set -# CONFIG_TYPHOON is not set -CONFIG_NET_VENDOR_ADAPTEC=y -# CONFIG_ADAPTEC_STARFIRE is not set -CONFIG_NET_VENDOR_AGERE=y -# CONFIG_ET131X is not set -CONFIG_NET_VENDOR_ALACRITECH=y -# CONFIG_SLICOSS is not set -CONFIG_NET_VENDOR_ALTEON=y -# CONFIG_ACENIC is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_ALTERA_TSE is not set -CONFIG_NET_VENDOR_AMAZON=y -# CONFIG_ENA_ETHERNET is not set -CONFIG_NET_VENDOR_AMD=y -# CONFIG_AMD8111_ETH is not set -# CONFIG_PCNET32 is not set -# CONFIG_AMD_XGBE is not set -# CONFIG_AMD_XGBE_HAVE_ECC is not set -CONFIG_NET_VENDOR_AQUANTIA=y -# CONFIG_AQFWD is not set -CONFIG_NET_VENDOR_ARC=y -CONFIG_NET_VENDOR_ATHEROS=y -# CONFIG_ATL2 is not set -# CONFIG_ATL1 is not set -# CONFIG_ATL1E is not set -# CONFIG_ATL1C is not set -# CONFIG_ALX is not set +# CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_VENDOR_AURORA is not set -CONFIG_NET_CADENCE=y -# CONFIG_MACB is not set -CONFIG_NET_VENDOR_BROADCOM=y -# CONFIG_B44 is not set -# CONFIG_BCMGENET is not set -# CONFIG_BNX2 is not set -# CONFIG_CNIC is not set -# CONFIG_TIGON3 is not set -# CONFIG_BNX2X is not set -# CONFIG_SYSTEMPORT is not set -# CONFIG_BNXT is not set -CONFIG_NET_VENDOR_BROCADE=y -# CONFIG_BNA is not set -CONFIG_NET_VENDOR_CAVIUM=y -# CONFIG_THUNDER_NIC_PF is not set -# CONFIG_THUNDER_NIC_VF is not set -# CONFIG_THUNDER_NIC_BGX is not set -# CONFIG_THUNDER_NIC_RGX is not set -# CONFIG_LIQUIDIO is not set -# CONFIG_LIQUIDIO_VF is not set -CONFIG_NET_VENDOR_CHELSIO=y -# CONFIG_CHELSIO_T1 is not set -# CONFIG_CHELSIO_T3 is not set -# CONFIG_CHELSIO_T4 is not set -# CONFIG_CHELSIO_T4VF is not set -CONFIG_NET_VENDOR_CISCO=y -# CONFIG_ENIC is not set +# CONFIG_NET_CADENCE is not set +# CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_DNET is not set -CONFIG_NET_VENDOR_DEC=y -# CONFIG_NET_TULIP is not set -CONFIG_NET_VENDOR_DLINK=y -# CONFIG_DL2K is not set -# CONFIG_SUNDANCE is not set -CONFIG_NET_VENDOR_EMULEX=y -# CONFIG_BE2NET is not set -CONFIG_NET_VENDOR_EZCHIP=y -# CONFIG_EZCHIP_NPS_MANAGEMENT_ENET is not set -CONFIG_NET_VENDOR_EXAR=y -# CONFIG_S2IO is not set -# CONFIG_VXGE is not set -CONFIG_NET_VENDOR_HISILICON=y -# CONFIG_HIX5HD2_GMAC is not set -# CONFIG_HISI_FEMAC is not set -# CONFIG_HIP04_ETH is not set -# CONFIG_HNS is not set -# CONFIG_HNS_DSAF is not set -# CONFIG_HNS_ENET is not set -# CONFIG_HNS3 is not set -CONFIG_NET_VENDOR_HP=y -# CONFIG_HP100 is not set -CONFIG_NET_VENDOR_HUAWEI=y -CONFIG_NET_VENDOR_INTEL=y -# CONFIG_E100 is not set -# CONFIG_E1000 is not set -# CONFIG_E1000E is not set -# CONFIG_IGB is not set -# CONFIG_IGBVF is not set -# CONFIG_IXGB is not set -# CONFIG_IXGBE is not set -# CONFIG_IXGBEVF is not set -# CONFIG_I40E is not set -# CONFIG_I40EVF is not set -# CONFIG_FM10K is not set -CONFIG_NET_VENDOR_I825XX=y -# CONFIG_JME is not set -CONFIG_NET_VENDOR_MARVELL=y -# CONFIG_MVMDIO is not set -# CONFIG_SKGE is not set -CONFIG_SKY2=y -# CONFIG_SKY2_DEBUG is not set -CONFIG_NET_VENDOR_MELLANOX=y -# CONFIG_MLX4_EN is not set -# CONFIG_MLX4_CORE is not set -# CONFIG_MLX5_CORE is not set -# CONFIG_MLXSW_CORE is not set -# CONFIG_MLXFW is not set -CONFIG_NET_VENDOR_MICREL=y -# CONFIG_KS8842 is not set -# CONFIG_KS8851 is not set -# CONFIG_KS8851_MLL is not set -# CONFIG_KSZ884X_PCI is not set -CONFIG_NET_VENDOR_MICROCHIP=y -# CONFIG_ENC28J60 is not set -# CONFIG_ENCX24J600 is not set -CONFIG_NET_VENDOR_MYRI=y -# CONFIG_MYRI10GE is not set -# CONFIG_FEALNX is not set -CONFIG_NET_VENDOR_NATSEMI=y -# CONFIG_NATSEMI is not set -# CONFIG_NS83820 is not set -CONFIG_NET_VENDOR_NETRONOME=y -# CONFIG_NFP is not set -CONFIG_NET_VENDOR_8390=y -# CONFIG_NE2K_PCI is not set -CONFIG_NET_VENDOR_NVIDIA=y -# CONFIG_FORCEDETH is not set -CONFIG_NET_VENDOR_OKI=y +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_HISILICON is not set +# CONFIG_NET_VENDOR_HUAWEI is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set # CONFIG_ETHOC is not set -CONFIG_NET_PACKET_ENGINE=y -# CONFIG_HAMACHI is not set -# CONFIG_YELLOWFIN is not set -CONFIG_NET_VENDOR_QLOGIC=y -# CONFIG_QLA3XXX is not set -# CONFIG_QLCNIC is not set -# CONFIG_QLGE is not set -# CONFIG_NETXEN_NIC is not set -# CONFIG_QED is not set CONFIG_NET_VENDOR_QUALCOMM=y # CONFIG_QCA7000_SPI is not set # CONFIG_QCOM_EMAC is not set CONFIG_RMNET=y -CONFIG_NET_VENDOR_REALTEK=y -# CONFIG_8139CP is not set -# CONFIG_8139TOO is not set -# CONFIG_R8169 is not set -CONFIG_NET_VENDOR_RENESAS=y -CONFIG_NET_VENDOR_RDC=y -# CONFIG_R6040 is not set -CONFIG_NET_VENDOR_ROCKER=y -# CONFIG_ROCKER is not set -CONFIG_NET_VENDOR_SAMSUNG=y -# CONFIG_SXGBE_ETH is not set -CONFIG_NET_VENDOR_SEEQ=y -CONFIG_NET_VENDOR_SILAN=y -# CONFIG_SC92031 is not set -CONFIG_NET_VENDOR_SIS=y -# CONFIG_SIS900 is not set -# CONFIG_SIS190 is not set -CONFIG_NET_VENDOR_SOLARFLARE=y -# CONFIG_SFC is not set -# CONFIG_SFC_FALCON is not set -CONFIG_NET_VENDOR_SMSC=y -# CONFIG_SMC91X is not set -# CONFIG_EPIC100 is not set -CONFIG_SMSC911X=y -# CONFIG_SMSC911X_ARCH_HOOKS is not set -# CONFIG_SMSC9420 is not set -CONFIG_NET_VENDOR_STMICRO=y -# CONFIG_STMMAC_ETH is not set -CONFIG_NET_VENDOR_SUN=y -# CONFIG_HAPPYMEAL is not set -# CONFIG_SUNGEM is not set -# CONFIG_CASSINI is not set -# CONFIG_NIU is not set -CONFIG_NET_VENDOR_TEHUTI=y -# CONFIG_TEHUTI is not set -CONFIG_NET_VENDOR_TI=y -# CONFIG_TI_CPSW_ALE is not set -# CONFIG_TLAN is not set -CONFIG_NET_VENDOR_VIA=y -# CONFIG_VIA_RHINE is not set -# CONFIG_VIA_VELOCITY is not set -CONFIG_NET_VENDOR_WIZNET=y -# CONFIG_WIZNET_W5100 is not set -# CONFIG_WIZNET_W5300 is not set -CONFIG_NET_VENDOR_SYNOPSYS=y -# CONFIG_DWC_XLGMAC is not set -# CONFIG_FDDI is not set -# CONFIG_HIPPI is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set CONFIG_MDIO_DEVICE=y CONFIG_MDIO_BUS=y # CONFIG_MDIO_BCM_UNIMAC is not set @@ -1887,7 +1607,6 @@ CONFIG_MDIO_BUS=y # CONFIG_MDIO_BUS_MUX_MMIOREG is not set # CONFIG_MDIO_HISI_FEMAC is not set # CONFIG_MDIO_OCTEON is not set -# CONFIG_MDIO_THUNDER is not set CONFIG_PHYLIB=y CONFIG_SWPHY=y # CONFIG_LED_TRIGGER_PHY is not set @@ -1914,7 +1633,7 @@ CONFIG_FIXED_PHY=y # CONFIG_MARVELL_PHY is not set # CONFIG_MARVELL_10G_PHY is not set # CONFIG_MICREL_PHY is not set -CONFIG_MICROCHIP_PHY=y +# CONFIG_MICROCHIP_PHY is not set # CONFIG_MICROSEMI_PHY is not set # CONFIG_NATIONAL_PHY is not set # CONFIG_QSEMI_PHY is not set @@ -1947,7 +1666,7 @@ CONFIG_USB_NET_DRIVERS=y # CONFIG_USB_PEGASUS is not set # CONFIG_USB_RTL8150 is not set CONFIG_USB_RTL8152=y -CONFIG_USB_LAN78XX=y +# CONFIG_USB_LAN78XX is not set CONFIG_USB_USBNET=y CONFIG_USB_NET_AX8817X=y CONFIG_USB_NET_AX88179_178A=y @@ -1962,19 +1681,12 @@ CONFIG_USB_NET_CDC_NCM=y # CONFIG_USB_NET_SMSC75XX is not set # CONFIG_USB_NET_SMSC95XX is not set # CONFIG_USB_NET_GL620A is not set -CONFIG_USB_NET_NET1080=y +# CONFIG_USB_NET_NET1080 is not set # CONFIG_USB_NET_PLUSB is not set # CONFIG_USB_NET_MCS7830 is not set # CONFIG_USB_NET_RNDIS_HOST is not set -CONFIG_USB_NET_CDC_SUBSET_ENABLE=y -CONFIG_USB_NET_CDC_SUBSET=y -# CONFIG_USB_ALI_M5632 is not set -# CONFIG_USB_AN2720 is not set -CONFIG_USB_BELKIN=y -CONFIG_USB_ARMLINUX=y -# CONFIG_USB_EPSON2888 is not set -# CONFIG_USB_KC2190 is not set -CONFIG_USB_NET_ZAURUS=y +# CONFIG_USB_NET_CDC_SUBSET is not set +# CONFIG_USB_NET_ZAURUS is not set # CONFIG_USB_NET_CX82310_ETH is not set # CONFIG_USB_NET_KALMIA is not set # CONFIG_USB_NET_QMI_WWAN is not set @@ -1990,9 +1702,7 @@ CONFIG_WLAN=y CONFIG_WLAN_VENDOR_ATH=y # CONFIG_ATH_DEBUG is not set # CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS is not set -# CONFIG_ATH5K_PCI is not set # CONFIG_ATH6KL is not set -# CONFIG_WIL6210 is not set # CONFIG_WLAN_VENDOR_ATMEL is not set # CONFIG_WLAN_VENDOR_BROADCOM is not set # CONFIG_WLAN_VENDOR_CISCO is not set @@ -2011,7 +1721,6 @@ CONFIG_WLAN_VENDOR_ATH=y CONFIG_WCNSS_MEM_PRE_ALLOC=y # CONFIG_CNSS_CRYPTO is not set CONFIG_CLD_LL_CORE=y -# CONFIG_CNSS2 is not set # CONFIG_CNSS_QCA6290 is not set # CONFIG_CNSS_QCA6390 is not set # CONFIG_CNSS_EMULATION is not set @@ -2028,7 +1737,6 @@ CONFIG_CNSS_GENL=y # Enable WiMAX (Networking options) to see the WiMAX drivers # # CONFIG_WAN is not set -# CONFIG_VMXNET3 is not set # CONFIG_ISDN is not set # CONFIG_NVM is not set @@ -2174,7 +1882,6 @@ CONFIG_TOUCHSCREEN_PROPERTIES=y # CONFIG_TOUCHSCREEN_SIS_I2C is not set # CONFIG_TOUCHSCREEN_ST1232 is not set # CONFIG_TOUCHSCREEN_STMFTS is not set -# CONFIG_TOUCHSCREEN_SUR40 is not set # CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set # CONFIG_TOUCHSCREEN_SX8654 is not set # CONFIG_TOUCHSCREEN_TPS6507X is not set @@ -2230,7 +1937,6 @@ CONFIG_INPUT_UINPUT=y CONFIG_SERIO=y # CONFIG_SERIO_SERPORT is not set # CONFIG_SERIO_AMBAKMI is not set -# CONFIG_SERIO_PCIPS2 is not set CONFIG_SERIO_LIBPS2=y # CONFIG_SERIO_RAW is not set # CONFIG_SERIO_ALTERA_PS2 is not set @@ -2249,7 +1955,6 @@ CONFIG_TTY=y CONFIG_UNIX98_PTYS=y # CONFIG_LEGACY_PTYS is not set # CONFIG_SERIAL_NONSTANDARD is not set -# CONFIG_NOZOMI is not set # CONFIG_N_GSM is not set # CONFIG_TRACE_SINK is not set CONFIG_LDISC_AUTOLOAD=y @@ -2270,7 +1975,6 @@ CONFIG_LDISC_AUTOLOAD=y # CONFIG_SERIAL_MAX310X is not set # CONFIG_SERIAL_UARTLITE is not set CONFIG_SERIAL_CORE=y -# CONFIG_SERIAL_JSM is not set # CONFIG_SERIAL_MSM is not set CONFIG_SERIAL_MSM_GENI=y # CONFIG_SERIAL_MSM_GENI_CONSOLE is not set @@ -2283,7 +1987,6 @@ CONFIG_SERIAL_MSM_GENI=y # CONFIG_SERIAL_IFX6X60 is not set # CONFIG_SERIAL_XILINX_PS_UART is not set # CONFIG_SERIAL_ARC is not set -# CONFIG_SERIAL_RP2 is not set # CONFIG_SERIAL_FSL_LPUART is not set # CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set # CONFIG_SERIAL_DEV_BUS is not set @@ -2294,15 +1997,12 @@ CONFIG_HW_RANDOM=y # CONFIG_HW_RANDOM_TIMERIOMEM is not set # CONFIG_HW_RANDOM_MSM is not set CONFIG_HW_RANDOM_MSM_LEGACY=y -CONFIG_HW_RANDOM_CAVIUM=y -# CONFIG_APPLICOM is not set # # PCMCIA character devices # # CONFIG_RAW_DRIVER is not set # CONFIG_TCG_TPM is not set -# CONFIG_DEVPORT is not set # CONFIG_XILLYBUS is not set # @@ -2310,7 +2010,6 @@ CONFIG_HW_RANDOM_CAVIUM=y # CONFIG_DIAG_CHAR=y CONFIG_DIAG_OVER_USB=y -CONFIG_DIAGFWD_BRIDGE_CODE=y CONFIG_MSM_FASTCVPD=y CONFIG_MSM_ADSPRPC=y # CONFIG_MSM_RDBG is not set @@ -2344,31 +2043,12 @@ CONFIG_I2C_ALGOBIT=y # I2C Hardware Bus support # -# -# PC SMBus host controller drivers -# -# CONFIG_I2C_ALI1535 is not set -# CONFIG_I2C_ALI1563 is not set -# CONFIG_I2C_ALI15X3 is not set -# CONFIG_I2C_AMD756 is not set -# CONFIG_I2C_AMD8111 is not set -# CONFIG_I2C_I801 is not set -# CONFIG_I2C_ISCH is not set -# CONFIG_I2C_PIIX4 is not set -# CONFIG_I2C_NFORCE2 is not set -# CONFIG_I2C_SIS5595 is not set -# CONFIG_I2C_SIS630 is not set -# CONFIG_I2C_SIS96X is not set -# CONFIG_I2C_VIA is not set -# CONFIG_I2C_VIAPRO is not set - # # I2C system bus drivers (mostly embedded / system-on-chip) # # CONFIG_I2C_CADENCE is not set # CONFIG_I2C_CBUS_GPIO is not set # CONFIG_I2C_DESIGNWARE_PLATFORM is not set -# CONFIG_I2C_DESIGNWARE_PCI is not set # CONFIG_I2C_EMEV2 is not set # CONFIG_I2C_GPIO is not set # CONFIG_I2C_NOMADIK is not set @@ -2379,7 +2059,6 @@ CONFIG_I2C_ALGOBIT=y CONFIG_I2C_QCOM_GENI=y # CONFIG_I2C_RK3X is not set # CONFIG_I2C_SIMTEC is not set -# CONFIG_I2C_THUNDERX is not set # CONFIG_I2C_XILINX is not set # @@ -2400,7 +2079,6 @@ CONFIG_I2C_QCOM_GENI=y # CONFIG_I2C_DEBUG_ALGO is not set # CONFIG_I2C_DEBUG_BUS is not set CONFIG_SPI=y -# CONFIG_SPI_DEBUG is not set CONFIG_SPI_MASTER=y # @@ -2415,13 +2093,11 @@ CONFIG_SPI_MASTER=y # CONFIG_SPI_FSL_SPI is not set # CONFIG_SPI_OC_TINY is not set # CONFIG_SPI_PL022 is not set -# CONFIG_SPI_PXA2XX is not set # CONFIG_SPI_PXA2XX_PCI is not set # CONFIG_SPI_ROCKCHIP is not set # CONFIG_SPI_QUP is not set CONFIG_SPI_QCOM_GENI=y # CONFIG_SPI_SC18IS602 is not set -# CONFIG_SPI_THUNDERX is not set # CONFIG_SPI_XCOMM is not set # CONFIG_SPI_XILINX is not set # CONFIG_SPI_ZYNQMP_GQSPI is not set @@ -2458,7 +2134,6 @@ CONFIG_PINCTRL=y CONFIG_PINMUX=y CONFIG_PINCONF=y CONFIG_GENERIC_PINCONF=y -# CONFIG_DEBUG_PINCTRL is not set # CONFIG_PINCTRL_AMD is not set # CONFIG_PINCTRL_MCP23S08 is not set # CONFIG_PINCTRL_SINGLE is not set @@ -2493,7 +2168,6 @@ CONFIG_PINCTRL_SM8150=y CONFIG_GPIOLIB=y CONFIG_OF_GPIO=y CONFIG_GPIOLIB_IRQCHIP=y -# CONFIG_DEBUG_GPIO is not set CONFIG_GPIO_SYSFS=y # @@ -2527,13 +2201,6 @@ CONFIG_GPIO_SYSFS=y # MFD GPIO expanders # -# -# PCI GPIO expanders -# -# CONFIG_GPIO_BT8XX is not set -# CONFIG_GPIO_PCI_IDIO_16 is not set -# CONFIG_GPIO_RDC321X is not set - # # SPI GPIO expanders # @@ -2641,7 +2308,6 @@ CONFIG_HWMON=y # CONFIG_SENSORS_ATXP1 is not set # CONFIG_SENSORS_DS620 is not set # CONFIG_SENSORS_DS1621 is not set -# CONFIG_SENSORS_I5K_AMB is not set # CONFIG_SENSORS_F71805F is not set # CONFIG_SENSORS_F71882FG is not set # CONFIG_SENSORS_F75375S is not set @@ -2708,7 +2374,6 @@ CONFIG_HWMON=y # CONFIG_SENSORS_SHT21 is not set # CONFIG_SENSORS_SHT3x is not set # CONFIG_SENSORS_SHTC1 is not set -# CONFIG_SENSORS_SIS5595 is not set # CONFIG_SENSORS_DME1737 is not set # CONFIG_SENSORS_EMC1403 is not set # CONFIG_SENSORS_EMC2103 is not set @@ -2734,9 +2399,7 @@ CONFIG_HWMON=y # CONFIG_SENSORS_TMP108 is not set # CONFIG_SENSORS_TMP401 is not set # CONFIG_SENSORS_TMP421 is not set -# CONFIG_SENSORS_VIA686A is not set # CONFIG_SENSORS_VT1211 is not set -# CONFIG_SENSORS_VT8231 is not set # CONFIG_SENSORS_W83781D is not set # CONFIG_SENSORS_W83791D is not set # CONFIG_SENSORS_W83792D is not set @@ -2826,9 +2489,6 @@ CONFIG_MFD_CORE=y # CONFIG_MFD_HI6421_PMIC is not set # CONFIG_HTC_PASIC3 is not set # CONFIG_HTC_I2CPLD is not set -# CONFIG_LPC_ICH is not set -# CONFIG_LPC_SCH is not set -# CONFIG_MFD_JANZ_CMODIO is not set # CONFIG_MFD_KEMPLD is not set # CONFIG_MFD_88PM800 is not set # CONFIG_MFD_88PM805 is not set @@ -2852,8 +2512,6 @@ CONFIG_MFD_CORE=y CONFIG_MFD_I2C_PMIC=y # CONFIG_MFD_QCOM_RPM is not set CONFIG_MFD_SPMI_PMIC=y -# CONFIG_MFD_RDC321X is not set -# CONFIG_MFD_RTSX_PCI is not set # CONFIG_MFD_RT5033 is not set # CONFIG_MFD_RTSX_USB is not set # CONFIG_MFD_RC5T583 is not set @@ -2892,7 +2550,6 @@ CONFIG_MFD_SYSCON=y # CONFIG_MFD_LM3533 is not set # CONFIG_MFD_TC3589X is not set # CONFIG_MFD_TMIO is not set -# CONFIG_MFD_VX855 is not set # CONFIG_MFD_ARIZONA_I2C is not set # CONFIG_MFD_ARIZONA_SPI is not set # CONFIG_MFD_WM8400 is not set @@ -2963,12 +2620,11 @@ CONFIG_MEDIA_SUPPORT=y # CONFIG_MEDIA_CAMERA_SUPPORT=y # CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set -CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y +# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set # CONFIG_MEDIA_RADIO_SUPPORT is not set # CONFIG_MEDIA_SDR_SUPPORT is not set # CONFIG_MEDIA_CEC_SUPPORT is not set CONFIG_MEDIA_CONTROLLER=y -# CONFIG_MEDIA_CONTROLLER_DVB is not set CONFIG_VIDEO_DEV=y CONFIG_VIDEO_V4L2_SUBDEV_API=y CONFIG_VIDEO_V4L2=y @@ -2976,56 +2632,14 @@ CONFIG_VIDEO_V4L2=y CONFIG_VIDEO_FIXED_MINOR_RANGES=y CONFIG_V4L2_MEM2MEM_DEV=y CONFIG_VIDEOBUF2_CORE=y -CONFIG_VIDEOBUF2_MEMOPS=y -CONFIG_VIDEOBUF2_VMALLOC=y # CONFIG_V4L2_LOOPBACK is not set -CONFIG_DVB_CORE=y -CONFIG_DVB_NET=y # CONFIG_TTPCI_EEPROM is not set -CONFIG_DVB_MAX_ADAPTERS=16 -# CONFIG_DVB_DYNAMIC_MINORS is not set -# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set # # Media drivers # -CONFIG_MEDIA_USB_SUPPORT=y - -# -# Webcam devices -# -CONFIG_USB_VIDEO_CLASS=y -CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y -# CONFIG_USB_GSPCA is not set -# CONFIG_USB_PWC is not set -# CONFIG_VIDEO_CPIA2 is not set -# CONFIG_USB_ZR364XX is not set -# CONFIG_USB_STKWEBCAM is not set -# CONFIG_USB_S2255 is not set -# CONFIG_VIDEO_USBTV is not set - -# -# Analog/digital TV USB devices -# -# CONFIG_VIDEO_AU0828 is not set - -# -# Digital TV USB devices -# -# CONFIG_DVB_USB_V2 is not set -# CONFIG_DVB_TTUSB_BUDGET is not set -# CONFIG_DVB_TTUSB_DEC is not set -# CONFIG_SMS_USB_DRV is not set -# CONFIG_DVB_B2C2_FLEXCOP_USB is not set -# CONFIG_DVB_AS102 is not set - -# -# Webcam, TV (analog/digital) USB devices -# -# CONFIG_VIDEO_EM28XX is not set -# CONFIG_MEDIA_PCI_SUPPORT is not set +# CONFIG_MEDIA_USB_SUPPORT is not set CONFIG_V4L_PLATFORM_DRIVERS=y -# CONFIG_VIDEO_CAFE_CCIC is not set # CONFIG_VIDEO_MUX is not set # CONFIG_VIDEO_QCOM_CAMSS is not set # CONFIG_SOC_CAMERA is not set @@ -3046,7 +2660,7 @@ CONFIG_MSM_VIDC_GOVERNORS=y CONFIG_MSM_SDE_ROTATOR=y # CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG is not set CONFIG_MSM_NPU=y -# CONFIG_DVB_MPQ is not set +# CONFIG_MSM_NPU_DEBUG_FS is not set # # Qualcomm Technologies, Inc. Demux device config @@ -3054,7 +2668,6 @@ CONFIG_MSM_NPU=y # CONFIG_TSPP is not set # CONFIG_V4L_MEM2MEM_DRIVERS is not set # CONFIG_V4L_TEST_DRIVERS is not set -# CONFIG_DVB_PLATFORM_DRIVERS is not set # # Qualcomm Technologies, Inc. MSM Camera And Video @@ -3129,75 +2742,18 @@ CONFIG_MEDIA_SUBDRV_AUTOSELECT=y # # Sensors used on soc_camera driver # -CONFIG_MEDIA_TUNER=y -CONFIG_MEDIA_TUNER_SIMPLE=y -CONFIG_MEDIA_TUNER_TDA8290=y -CONFIG_MEDIA_TUNER_TDA827X=y -CONFIG_MEDIA_TUNER_TDA18271=y -CONFIG_MEDIA_TUNER_TDA9887=y -CONFIG_MEDIA_TUNER_MT20XX=y -CONFIG_MEDIA_TUNER_XC2028=y -CONFIG_MEDIA_TUNER_XC5000=y -CONFIG_MEDIA_TUNER_XC4000=y -CONFIG_MEDIA_TUNER_MC44S803=y - -# -# Multistandard (satellite) frontends -# - -# -# Multistandard (cable + terrestrial) frontends -# - -# -# DVB-S (satellite) frontends -# - -# -# DVB-T (terrestrial) frontends -# -# CONFIG_DVB_AS102_FE is not set -# CONFIG_DVB_GP8PSK_FE is not set - -# -# DVB-C (cable) frontends -# - -# -# ATSC (North American/Korean Terrestrial/Cable DTV) frontends -# - -# -# ISDB-T (terrestrial) frontends -# - -# -# ISDB-S (satellite) & ISDB-T (terrestrial) frontends -# - -# -# Digital terrestrial only tuners/PLL -# - -# -# SEC control devices for DVB-S -# # # Tools to develop new frontends # -# CONFIG_DVB_DUMMY_FE is not set # # Graphics support # -CONFIG_VGA_ARB=y -CONFIG_VGA_ARB_MAX_GPUS=16 CONFIG_DRM=y CONFIG_DRM_MIPI_DSI=y # CONFIG_DRM_DP_AUX_CHARDEV is not set # CONFIG_DRM_DEBUG_MM is not set -# CONFIG_DRM_DEBUG_MM_SELFTEST is not set CONFIG_DRM_KMS_HELPER=y # CONFIG_DRM_FBDEV_EMULATION is not set # CONFIG_DRM_LOAD_EDID_FIRMWARE is not set @@ -3210,27 +2766,20 @@ CONFIG_DRM_KMS_HELPER=y # CONFIG_DRM_I2C_NXP_TDA998X is not set # CONFIG_DRM_HDLCD is not set # CONFIG_DRM_MALI_DISPLAY is not set -# CONFIG_DRM_RADEON is not set -# CONFIG_DRM_AMDGPU is not set # # ACP (Audio CoProcessor) Configuration # -# CONFIG_DRM_NOUVEAU is not set # CONFIG_DRM_VGEM is not set # CONFIG_DRM_UDL is not set -# CONFIG_DRM_AST is not set -# CONFIG_DRM_MGAG200 is not set -# CONFIG_DRM_CIRRUS_QEMU is not set # CONFIG_DRM_RCAR_DW_HDMI is not set -# CONFIG_DRM_QXL is not set -# CONFIG_DRM_BOCHS is not set CONFIG_DRM_MSM=y # CONFIG_DRM_MSM_REGISTER_LOGGING is not set # CONFIG_DRM_MSM_HDMI_HDCP is not set # CONFIG_DRM_MSM_HDMI is not set # CONFIG_DRM_MSM_DSI is not set CONFIG_DRM_MSM_DSI_STAGING=y +CONFIG_DSI_PARSER=y # CONFIG_DRM_MSM_MDP5 is not set # CONFIG_DRM_MSM_MDP4 is not set # CONFIG_DRM_MSM_HDCP is not set @@ -3239,6 +2788,7 @@ CONFIG_DRM_SDE_WB=y # CONFIG_DRM_SDE_SHP is not set # CONFIG_DRM_SDE_EVTLOG_DEBUG is not set CONFIG_DRM_SDE_RSC=y +# CONFIG_FENCE_DEBUG is not set # CONFIG_DRM_MSM_LEASE is not set CONFIG_DRM_PANEL=y @@ -3274,10 +2824,9 @@ CONFIG_DRM_PANEL_BRIDGE=y # CONFIG_DRM_SII902X is not set # CONFIG_DRM_TOSHIBA_TC358767 is not set # CONFIG_DRM_TI_TFP410 is not set -CONFIG_DRM_LT_LT9611=y +# CONFIG_DRM_LT_LT9611 is not set # CONFIG_DRM_I2C_ADV7511 is not set # CONFIG_DRM_ARCPGU is not set -# CONFIG_DRM_HISI_HIBMC is not set # CONFIG_DRM_HISI_KIRIN is not set # CONFIG_DRM_MXSFB is not set # CONFIG_DRM_TINYDRM is not set @@ -3294,9 +2843,9 @@ CONFIG_FB_CMDLINE=y CONFIG_FB_NOTIFY=y # CONFIG_FB_DDC is not set # CONFIG_FB_BOOT_VESA_SUPPORT is not set -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_FILLRECT is not set +# CONFIG_FB_CFB_COPYAREA is not set +# CONFIG_FB_CFB_IMAGEBLIT is not set # CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set # CONFIG_FB_SYS_FILLRECT is not set # CONFIG_FB_SYS_COPYAREA is not set @@ -3307,52 +2856,26 @@ CONFIG_FB_CFB_IMAGEBLIT=y # CONFIG_FB_SVGALIB is not set # CONFIG_FB_MACMODES is not set # CONFIG_FB_BACKLIGHT is not set -CONFIG_FB_MODE_HELPERS=y +# CONFIG_FB_MODE_HELPERS is not set # CONFIG_FB_TILEBLITTING is not set # # Frame buffer hardware drivers # -# CONFIG_FB_CIRRUS is not set -# CONFIG_FB_PM2 is not set -CONFIG_FB_ARMCLCD=y -# CONFIG_FB_CYBER2000 is not set -# CONFIG_FB_ASILIANT is not set -# CONFIG_FB_IMSTT is not set +# CONFIG_FB_ARMCLCD is not set # CONFIG_FB_OPENCORES is not set # CONFIG_FB_S1D13XXX is not set -# CONFIG_FB_NVIDIA is not set -# CONFIG_FB_RIVA is not set -# CONFIG_FB_I740 is not set -# CONFIG_FB_MATROX is not set -# CONFIG_FB_RADEON is not set -# CONFIG_FB_ATY128 is not set -# CONFIG_FB_ATY is not set -# CONFIG_FB_S3 is not set -# CONFIG_FB_SAVAGE is not set -# CONFIG_FB_SIS is not set -# CONFIG_FB_NEOMAGIC is not set -# CONFIG_FB_KYRO is not set -# CONFIG_FB_3DFX is not set -# CONFIG_FB_VOODOO1 is not set -# CONFIG_FB_VT8623 is not set -# CONFIG_FB_TRIDENT is not set -# CONFIG_FB_ARK is not set -# CONFIG_FB_PM3 is not set -# CONFIG_FB_CARMINE is not set # CONFIG_FB_SMSCUFX is not set # CONFIG_FB_UDL is not set # CONFIG_FB_IBM_GXT4500 is not set # CONFIG_FB_VIRTUAL is not set # CONFIG_FB_METRONOME is not set -# CONFIG_FB_MB862XX is not set # CONFIG_FB_MSM is not set # CONFIG_FB_BROADSHEET is not set # CONFIG_FB_AUO_K190X is not set # CONFIG_FB_SIMPLE is not set # CONFIG_MSM_DBA is not set # CONFIG_FB_SSD1307 is not set -# CONFIG_FB_SM712 is not set CONFIG_BACKLIGHT_LCD_SUPPORT=y # CONFIG_LCD_CLASS_DEVICE is not set CONFIG_BACKLIGHT_CLASS_DEVICE=y @@ -3370,12 +2893,8 @@ CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y # CONFIG_BACKLIGHT_BD6107 is not set # CONFIG_BACKLIGHT_ARCXCNN is not set # CONFIG_VGASTATE is not set -CONFIG_VIDEOMODE_HELPERS=y CONFIG_HDMI=y -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -CONFIG_LOGO_LINUX_CLUT224=y +# CONFIG_LOGO is not set CONFIG_SOUND=y # CONFIG_SOUND_OSS_CORE is not set CONFIG_SND=y @@ -3401,19 +2920,13 @@ CONFIG_SND_VERBOSE_PROCFS=y # CONFIG_SND_SEQUENCER is not set # CONFIG_SND_OPL3_LIB_SEQ is not set # CONFIG_SND_OPL4_LIB_SEQ is not set -CONFIG_SND_DRIVERS=y -# CONFIG_SND_DUMMY is not set -# CONFIG_SND_ALOOP is not set -# CONFIG_SND_MTPAV is not set -# CONFIG_SND_SERIAL_U16550 is not set -# CONFIG_SND_MPU401 is not set -# CONFIG_SND_PCI is not set +# CONFIG_SND_DRIVERS is not set # # HD-Audio # CONFIG_SND_HDA_PREALLOC_SIZE=64 -CONFIG_SND_SPI=y +# CONFIG_SND_SPI is not set CONFIG_SND_USB=y CONFIG_SND_USB_AUDIO=y # CONFIG_SND_USB_UA101 is not set @@ -3670,7 +3183,6 @@ CONFIG_USB_SUPPORT=y CONFIG_USB_COMMON=y CONFIG_USB_ARCH_HAS_HCD=y CONFIG_USB=y -CONFIG_USB_PCI=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y # @@ -3690,7 +3202,6 @@ CONFIG_USB_DEFAULT_PERSIST=y # # CONFIG_USB_C67X00_HCD is not set CONFIG_USB_XHCI_HCD=y -CONFIG_USB_XHCI_PCI=y CONFIG_USB_XHCI_PLATFORM=y # CONFIG_USB_EHCI_HCD is not set # CONFIG_USB_OXU210HP_HCD is not set @@ -3699,7 +3210,6 @@ CONFIG_USB_XHCI_PLATFORM=y # CONFIG_USB_FOTG210_HCD is not set # CONFIG_USB_MAX3421_HCD is not set # CONFIG_USB_OHCI_HCD is not set -# CONFIG_USB_UHCI_HCD is not set # CONFIG_USB_SL811_HCD is not set # CONFIG_USB_R8A66597_HCD is not set # CONFIG_USB_HCD_TEST_MODE is not set @@ -3814,9 +3324,7 @@ CONFIG_MSM_HSUSB_PHY=y # CONFIG_USB_ULPI is not set CONFIG_DUAL_ROLE_USB_INTF=y CONFIG_USB_GADGET=y -# CONFIG_USB_GADGET_DEBUG is not set # CONFIG_USB_GADGET_DEBUG_FILES is not set -# CONFIG_USB_GADGET_DEBUG_FS is not set CONFIG_USB_GADGET_VBUS_DRAW=900 CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 @@ -3832,11 +3340,7 @@ CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 # CONFIG_USB_SNP_UDC_PLAT is not set # CONFIG_USB_M66592 is not set # CONFIG_USB_BDC_UDC is not set -# CONFIG_USB_AMD5536UDC is not set # CONFIG_USB_NET2272 is not set -# CONFIG_USB_NET2280 is not set -# CONFIG_USB_GOKU is not set -# CONFIG_USB_EG20T is not set # CONFIG_USB_GADGET_XILINX is not set # CONFIG_USB_CI13XXX_MSM is not set # CONFIG_USB_CI13XXX_MSM_HSIC is not set @@ -3977,7 +3481,6 @@ CONFIG_LEDS_TRIGGER_TIMER=y # CONFIG_LEDS_TRIGGER_CAMERA is not set # CONFIG_LEDS_TRIGGER_PANIC is not set # CONFIG_ACCESSIBILITY is not set -# CONFIG_INFINIBAND is not set CONFIG_EDAC_SUPPORT=y CONFIG_RTC_LIB=y CONFIG_RTC_CLASS=y @@ -4111,7 +3614,6 @@ CONFIG_QCOM_GPI_DMA=y # CONFIG_QCOM_GPI_DMA_DEBUG is not set # CONFIG_QCOM_PCI_EDMA is not set # CONFIG_DW_DMAC is not set -# CONFIG_DW_DMAC_PCI is not set # # DMA Clients @@ -4123,18 +3625,11 @@ CONFIG_QCOM_GPI_DMA=y # DMABUF options # CONFIG_SYNC_FILE=y -# CONFIG_SW_SYNC is not set # CONFIG_AUXDISPLAY is not set CONFIG_UIO=y -# CONFIG_UIO_CIF is not set # CONFIG_UIO_PDRV_GENIRQ is not set # CONFIG_UIO_DMEM_GENIRQ is not set -# CONFIG_UIO_AEC is not set -# CONFIG_UIO_SERCOS3 is not set -# CONFIG_UIO_PCI_GENERIC is not set -# CONFIG_UIO_NETX is not set # CONFIG_UIO_PRUSS is not set -# CONFIG_UIO_MF624 is not set CONFIG_UIO_MSM_SHAREDMEM=y # CONFIG_VFIO is not set # CONFIG_VIRT_DRIVERS is not set @@ -4142,7 +3637,6 @@ CONFIG_UIO_MSM_SHAREDMEM=y # # Virtio drivers # -# CONFIG_VIRTIO_PCI is not set # CONFIG_VIRTIO_MMIO is not set # @@ -4155,7 +3649,6 @@ CONFIG_STAGING=y # CONFIG_PRISM2_USB is not set # CONFIG_COMEDI is not set # CONFIG_R8712U is not set -# CONFIG_RTS5208 is not set # # IIO staging drivers @@ -4230,8 +3723,6 @@ CONFIG_STAGING=y # # Triggers - standalone # -# CONFIG_FB_SM750 is not set -# CONFIG_FB_XGI is not set # # Speakup console speech @@ -4242,7 +3733,6 @@ CONFIG_STAGING=y # Android # CONFIG_ASHMEM=y -# CONFIG_ANDROID_VSOC is not set CONFIG_ANDROID_LOW_MEMORY_KILLER=y CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES=y CONFIG_ION=y @@ -4253,7 +3743,6 @@ CONFIG_ION=y # CONFIG_ION_FORCE_DMA_SYNC is not set # CONFIG_ION_DEFER_FREE_NO_SCHED_IDLE is not set # CONFIG_STAGING_BOARD is not set -# CONFIG_DGNC is not set # CONFIG_GS_FPGABOOT is not set # CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set # CONFIG_FB_TFT is not set @@ -4298,10 +3787,8 @@ CONFIG_IPA_WDI_UNIFIED_API=y CONFIG_RMNET_IPA3=y # CONFIG_ECM_IPA is not set CONFIG_RNDIS_IPA=y -CONFIG_IPA3_MHI_PROXY=y -CONFIG_IPA3_MHI_PRIME_MANAGER=y -# CONFIG_IPA_UT is not set -# CONFIG_MSM_11AD is not set +# CONFIG_IPA3_MHI_PROXY is not set +# CONFIG_IPA3_MHI_PRIME_MANAGER is not set # CONFIG_SEEMP_CORE is not set # CONFIG_IPA3_REGDUMP is not set CONFIG_CLKDEV_LOOKUP=y @@ -4409,10 +3896,9 @@ CONFIG_TIMER_OF=y CONFIG_TIMER_PROBE=y CONFIG_ARM_ARCH_TIMER=y CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y -CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y -CONFIG_FSL_ERRATUM_A008585=y -CONFIG_HISILICON_ERRATUM_161010101=y -CONFIG_ARM64_ERRATUM_858921=y +# CONFIG_FSL_ERRATUM_A008585 is not set +# CONFIG_HISILICON_ERRATUM_161010101 is not set +# CONFIG_ARM64_ERRATUM_858921 is not set CONFIG_ARM_ARCH_TIMER_VCT_ACCESS=y # CONFIG_ARM_TIMER_SP804 is not set # CONFIG_ATMEL_PIT is not set @@ -4494,8 +3980,7 @@ CONFIG_RPMSG_QCOM_GLINK_SPI=y # # Qualcomm SoC drivers # -CONFIG_QCOM_MEM_OFFLINE=y -CONFIG_OVERRIDE_MEMORY_LIMIT=y +# CONFIG_QCOM_MEM_OFFLINE is not set # CONFIG_QCOM_CPUSS_DUMP is not set CONFIG_QCOM_RUN_QUEUE_STATS=y # CONFIG_QCOM_GSBI is not set @@ -4550,7 +4035,6 @@ CONFIG_QCOM_DCC_V2=y # CONFIG_MSM_GLADIATOR_ERP is not set # CONFIG_SDX_EXT_IPC is not set CONFIG_QCOM_SECURE_BUFFER=y -# CONFIG_MSM_REMOTEQDSS is not set CONFIG_ICNSS=y # CONFIG_ICNSS_DEBUG is not set CONFIG_ICNSS_QMI=y @@ -4569,7 +4053,6 @@ CONFIG_QSEE_IPC_IRQ_BRIDGE=y CONFIG_QCOM_GLINK=y CONFIG_QCOM_GLINK_PKT=y # CONFIG_MSM_JTAGV8 is not set -CONFIG_QCOM_QDSS_BRIDGE=y CONFIG_QTI_RPM_STATS_LOG=y CONFIG_MSM_CDSP_LOADER=y CONFIG_QCOM_SMCINVOKE=y @@ -4588,11 +4071,8 @@ CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET=1000000000 CONFIG_MEM_SHARE_QMI_SERVICE=y # CONFIG_MSM_HAB is not set # CONFIG_MSM_AGL is not set -CONFIG_RMNET_CTL=y -# CONFIG_RMNET_CTL_DEBUG is not set # CONFIG_QCOM_HGSL_TCSR_SIGNAL is not set CONFIG_MSM_PERFORMANCE=y -CONFIG_QMP_DEBUGFS_CLIENT=y CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION=y CONFIG_QCOM_SMP2P_SLEEPSTATE=y CONFIG_QCOM_CDSP_RM=y @@ -4968,8 +4448,6 @@ CONFIG_QCOM_SPMI_ADC5=y # CONFIG_TMP007 is not set # CONFIG_TSYS01 is not set # CONFIG_TSYS02D is not set -# CONFIG_NTB is not set -# CONFIG_VME_BUS is not set CONFIG_PWM=y CONFIG_PWM_SYSFS=y # CONFIG_PWM_FSL_FTM is not set @@ -4982,9 +4460,7 @@ CONFIG_QCOM_KGSL_IOMMU=y CONFIG_IRQCHIP=y CONFIG_ARM_GIC=y CONFIG_ARM_GIC_MAX_NR=1 -CONFIG_ARM_GIC_V2M=y CONFIG_ARM_GIC_V3=y -CONFIG_ARM_GIC_V3_ITS=y CONFIG_ARM_GIC_V3_ACL=y CONFIG_QCOM_SHOW_RESUME_IRQ=y CONFIG_PARTITION_PERCPU=y @@ -5066,14 +4542,7 @@ CONFIG_STM=y # CONFIG_FSI is not set # CONFIG_TEE is not set CONFIG_SENSORS_SSC=y -CONFIG_ESOC=y -CONFIG_ESOC_DEV=y -CONFIG_ESOC_CLIENT=y -# CONFIG_ESOC_DEBUG is not set -CONFIG_ESOC_MDM_4x=y -CONFIG_ESOC_MDM_DRV=y -CONFIG_ESOC_MDM_DBG_ENG=y -# CONFIG_MDM_DBG_REQ_ENG is not set +# CONFIG_ESOC is not set # # Qualcomm RmNet extensions @@ -5088,7 +4557,6 @@ CONFIG_ARM_PSCI_FW=y # CONFIG_ARM_PSCI_CHECKER is not set # CONFIG_ARM_SCPI_PROTOCOL is not set # CONFIG_FIRMWARE_MEMMAP is not set -# CONFIG_FW_CFG_SYSFS is not set CONFIG_QCOM_SCM_64=y CONFIG_HAVE_ARM_SMCCC=y # CONFIG_GOOGLE_FIRMWARE is not set @@ -5098,7 +4566,6 @@ CONFIG_HAVE_ARM_SMCCC=y # Tegra firmware driver # CONFIG_MSM_TZ_LOG=y -# CONFIG_ACPI is not set # # File systems @@ -5313,56 +4780,37 @@ CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 CONFIG_PRINTK_CPU_ID=y CONFIG_PRINTK_PID=y CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 -# CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_DYNAMIC_DEBUG is not set # CONFIG_DEBUG_MODULE_LOAD_INFO is not set -# CONFIG_DEBUG_CONSOLE_UNHASHED_POINTERS is not set # # Compile-time checks and compiler options # -CONFIG_DEBUG_INFO=y -# CONFIG_DEBUG_INFO_REDUCED is not set -# CONFIG_DEBUG_INFO_SPLIT is not set -# CONFIG_DEBUG_INFO_DWARF4 is not set -# CONFIG_GDB_SCRIPTS is not set CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y -CONFIG_FRAME_WARN=6144 -# CONFIG_STRIP_ASM_SYMS is not set -# CONFIG_READABLE_ASM is not set +CONFIG_FRAME_WARN=0 +CONFIG_STRIP_ASM_SYMS=y # CONFIG_UNUSED_SYMBOLS is not set -# CONFIG_PAGE_OWNER is not set -CONFIG_DEBUG_FS=y +# CONFIG_DEBUG_FS is not set # CONFIG_HEADERS_CHECK is not set +CONFIG_OPTIMIZE_INLINING=y # CONFIG_DEBUG_SECTION_MISMATCH is not set CONFIG_SECTION_MISMATCH_WARN_ONLY=y CONFIG_ARCH_WANT_FRAME_POINTERS=y CONFIG_FRAME_POINTER=y -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 -CONFIG_MAGIC_SYSRQ_SERIAL=y -CONFIG_DEBUG_KERNEL=y +# CONFIG_MAGIC_SYSRQ is not set +# CONFIG_DEBUG_KERNEL is not set # # Memory Debugging # -CONFIG_PAGE_EXTENSION=y -# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_EXTENSION is not set # CONFIG_PAGE_POISONING is not set -# CONFIG_DEBUG_PAGE_REF is not set # CONFIG_DEBUG_RODATA_TEST is not set -# CONFIG_DEBUG_OBJECTS is not set # CONFIG_SLUB_STATS is not set CONFIG_HAVE_DEBUG_KMEMLEAK=y -# CONFIG_DEBUG_KMEMLEAK is not set -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_DEBUG_VM is not set CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y -# CONFIG_DEBUG_VIRTUAL is not set # CONFIG_DEBUG_MEMORY_INIT is not set -# CONFIG_DEBUG_PER_CPU_MAPS is not set CONFIG_HAVE_ARCH_KASAN=y CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y CONFIG_CC_HAS_KASAN_GENERIC=y @@ -5372,119 +4820,50 @@ CONFIG_KASAN_STACK=0 CONFIG_ARCH_HAS_KCOV=y CONFIG_CC_HAS_SANCOV_TRACE_PC=y # CONFIG_KCOV is not set -# CONFIG_DEBUG_SHIRQ is not set # # Debug Lockups and Hangs # -# CONFIG_SOFTLOCKUP_DETECTOR is not set # CONFIG_PANIC_ON_RECURSIVE_FAULT is not set -# CONFIG_DETECT_HUNG_TASK is not set -# CONFIG_WQ_WATCHDOG is not set # CONFIG_PANIC_ON_OOPS is not set CONFIG_PANIC_ON_OOPS_VALUE=0 CONFIG_PANIC_TIMEOUT=-1 -CONFIG_SCHED_DEBUG=y +# CONFIG_SCHED_DEBUG is not set CONFIG_SCHED_INFO=y # CONFIG_PANIC_ON_SCHED_BUG is not set # CONFIG_PANIC_ON_RT_THROTTLING is not set -CONFIG_SCHEDSTATS=y -# CONFIG_SCHED_STACK_END_CHECK is not set # CONFIG_DEBUG_TIMEKEEPING is not set -# CONFIG_DEBUG_PREEMPT is not set # # Lock Debugging (spinlocks, mutexes, etc...) # -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_PROVE_LOCKING is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_ATOMIC_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -# CONFIG_LOCK_TORTURE_TEST is not set # CONFIG_WW_MUTEX_SELFTEST is not set CONFIG_STACKTRACE=y # CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set -# CONFIG_DEBUG_KOBJECT is not set CONFIG_HAVE_DEBUG_BUGVERBOSE=y CONFIG_DEBUG_BUGVERBOSE=y -CONFIG_DEBUG_LIST=y -# CONFIG_DEBUG_PI_LIST is not set -# CONFIG_DEBUG_SG is not set -# CONFIG_DEBUG_NOTIFIERS is not set -# CONFIG_DEBUG_CREDENTIALS is not set # # RCU Debugging # # CONFIG_PROVE_RCU is not set # CONFIG_TORTURE_TEST is not set -# CONFIG_RCU_PERF_TEST is not set -# CONFIG_RCU_TORTURE_TEST is not set CONFIG_RCU_CPU_STALL_TIMEOUT=21 CONFIG_RCU_PANIC_ON_STALL=0 -# CONFIG_RCU_TRACE is not set -# CONFIG_RCU_EQS_DEBUG is not set -# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set -# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set -# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set -# CONFIG_NOTIFIER_ERROR_INJECTION is not set -# CONFIG_FAULT_INJECTION is not set -# CONFIG_LATENCYTOP is not set -CONFIG_NOP_TRACER=y CONFIG_HAVE_FUNCTION_TRACER=y CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y CONFIG_HAVE_DYNAMIC_FTRACE=y CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y CONFIG_HAVE_SYSCALL_TRACEPOINTS=y CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_TRACE_CLOCK=y -CONFIG_RING_BUFFER=y -CONFIG_EVENT_TRACING=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_IPC_LOGGING=y -# CONFIG_QCOM_RTB is not set -CONFIG_TRACING=y -CONFIG_GENERIC_TRACER=y +# CONFIG_IPC_LOGGING is not set CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -# CONFIG_FUNCTION_TRACER is not set -# CONFIG_PREEMPTIRQ_EVENTS is not set -# CONFIG_IRQSOFF_TRACER is not set -# CONFIG_PREEMPT_TRACER is not set -# CONFIG_SCHED_TRACER is not set -# CONFIG_HWLAT_TRACER is not set -# CONFIG_FTRACE_SYSCALLS is not set -# CONFIG_TRACER_SNAPSHOT is not set -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -# CONFIG_STACK_TRACER is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -CONFIG_UPROBE_EVENTS=y -CONFIG_BPF_EVENTS=y -CONFIG_PROBE_EVENTS=y -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_TRACEPOINT_BENCHMARK is not set -# CONFIG_RING_BUFFER_BENCHMARK is not set -# CONFIG_RING_BUFFER_STARTUP_TEST is not set -# CONFIG_TRACE_EVAL_MAP_FILE is not set -CONFIG_TRACING_EVENTS_GPIO=y +# CONFIG_FTRACE is not set # CONFIG_DMA_API_DEBUG is not set # # Runtime Testing # -# CONFIG_LKDTM is not set -# CONFIG_TEST_LIST_SORT is not set -# CONFIG_TEST_SORT is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_RBTREE_TEST is not set -# CONFIG_REED_SOLOMON_TEST is not set -# CONFIG_INTERVAL_TREE_TEST is not set # CONFIG_ATOMIC64_SELFTEST is not set # CONFIG_TEST_HEXDUMP is not set # CONFIG_TEST_STRING_HELPERS is not set @@ -5504,13 +4883,11 @@ CONFIG_TRACING_EVENTS_GPIO=y # CONFIG_PANIC_ON_DATA_CORRUPTION is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y -# CONFIG_KGDB is not set CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y # CONFIG_ARCH_WANTS_UBSAN_NO_NULL is not set # CONFIG_UBSAN is not set CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y # CONFIG_ARM64_PTDUMP_CORE is not set -# CONFIG_ARM64_PTDUMP_DEBUGFS is not set # CONFIG_PID_IN_CONTEXTIDR is not set # CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set # CONFIG_DEBUG_WX is not set @@ -5554,21 +4931,13 @@ CONFIG_SECURITY_SELINUX_DEVELOP=y CONFIG_SECURITY_SELINUX_AVC_STATS=y CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=0 CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 -CONFIG_SECURITY_SMACK=y -# CONFIG_SECURITY_SMACK_BRINGUP is not set -# CONFIG_SECURITY_SMACK_NETFILTER is not set -# CONFIG_SECURITY_SMACK_APPEND_SIGNALS is not set +# CONFIG_SECURITY_SMACK is not set # CONFIG_SECURITY_TOMOYO is not set # CONFIG_SECURITY_APPARMOR is not set # CONFIG_SECURITY_LOADPIN is not set # CONFIG_SECURITY_YAMA is not set -CONFIG_INTEGRITY=y -# CONFIG_INTEGRITY_SIGNATURE is not set -CONFIG_INTEGRITY_AUDIT=y -# CONFIG_IMA is not set -# CONFIG_EVM is not set +# CONFIG_INTEGRITY is not set CONFIG_DEFAULT_SECURITY_SELINUX=y -# CONFIG_DEFAULT_SECURITY_SMACK is not set # CONFIG_DEFAULT_SECURITY_DAC is not set CONFIG_DEFAULT_SECURITY="selinux" @@ -5719,30 +5088,20 @@ CONFIG_CRYPTO_JITTERENTROPY=y # CONFIG_CRYPTO_USER_API_SKCIPHER is not set # CONFIG_CRYPTO_USER_API_RNG is not set # CONFIG_CRYPTO_USER_API_AEAD is not set -CONFIG_CRYPTO_HASH_INFO=y CONFIG_CRYPTO_HW=y # CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC is not set # CONFIG_CRYPTO_DEV_CCP is not set -# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set -# CONFIG_CRYPTO_DEV_CAVIUM_ZIP is not set # CONFIG_CRYPTO_DEV_QCE is not set CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y # CONFIG_CRYPTO_DEV_OTA_CRYPTO is not set CONFIG_CRYPTO_DEV_QCOM_ICE=y -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y -CONFIG_X509_CERTIFICATE_PARSER=y -CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_ASYMMETRIC_KEY_TYPE is not set # # Certificates for signature checking # -CONFIG_SYSTEM_TRUSTED_KEYRING=y -CONFIG_SYSTEM_TRUSTED_KEYS="" -# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set -# CONFIG_SECONDARY_TRUSTED_KEYRING is not set # CONFIG_SYSTEM_BLACKLIST_KEYRING is not set CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA256_ARM64=y @@ -5758,7 +5117,7 @@ CONFIG_CRYPTO_AES_ARM64_CE_BLK=y CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y CONFIG_CRYPTO_CHACHA20_NEON=y # CONFIG_CRYPTO_AES_ARM64_BS is not set -CONFIG_BINARY_PRINTF=y +# CONFIG_BINARY_PRINTF is not set # # Library routines @@ -5772,6 +5131,7 @@ CONFIG_GENERIC_NET_UTILS=y CONFIG_GENERIC_PCI_IOMAP=y CONFIG_GENERIC_IO=y CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y CONFIG_CRC_CCITT=y CONFIG_CRC16=y CONFIG_CRC_T10DIF=y @@ -5787,9 +5147,7 @@ CONFIG_CRC32_SLICEBY8=y CONFIG_LIBCRC32C=y CONFIG_CRC8=y CONFIG_XXHASH=y -CONFIG_AUDIT_GENERIC=y CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y -CONFIG_AUDIT_COMPAT_GENERIC=y # CONFIG_RANDOM32_SELFTEST is not set CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=y @@ -5812,7 +5170,6 @@ CONFIG_TEXTSEARCH_BM=y CONFIG_TEXTSEARCH_FSM=y CONFIG_ASSOCIATIVE_ARRAY=y CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT_MAP=y CONFIG_HAS_DMA=y # CONFIG_SGL_ALLOC is not set # CONFIG_DMA_NOOP_OPS is not set @@ -5828,7 +5185,6 @@ CONFIG_CLZ_TAB=y # CONFIG_IRQ_POLL is not set CONFIG_MPILIB=y CONFIG_LIBFDT=y -CONFIG_OID_REGISTRY=y # CONFIG_SG_SPLIT is not set CONFIG_SG_POOL=y CONFIG_ARCH_HAS_SG_CHAIN=y diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index a658464763a8..475a88f0029d 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -120,8 +120,8 @@ static inline void gic_write_bpr1(u32 val) write_sysreg_s(val, SYS_ICC_BPR1_EL1); } -#define gic_read_typer(c) readq_relaxed_no_log(c) -#define gic_write_irouter(v, c) writeq_relaxed_no_log(v, c) +#define gic_read_typer(c) readq_relaxed(c) +#define gic_write_irouter(v, c) writeq_relaxed(v, c) #define gic_read_lpir(c) readq_relaxed(c) #define gic_write_lpir(v, c) writeq_relaxed(v, c) diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index c0235e0ff849..36d58b25edff 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -21,13 +21,37 @@ #define __ASM_ATOMIC_H #include +#include #include #include +#include #include #ifdef __KERNEL__ +/* + * To avoid having to allocate registers that pass the counter address and + * address of the call site to the overflow handler, encode the register and + * call site offset in a dummy cbz instruction that we can decode later. + */ +#define REFCOUNT_CHECK_TAIL \ +" .subsection 1\n" \ +"33: brk " __stringify(REFCOUNT_BRK_IMM) "\n" \ +" cbz %[counter], 22b\n" /* never reached */ \ +" .previous\n" + +#define REFCOUNT_POST_CHECK_NEG \ +"22: b.mi 33f\n" \ + REFCOUNT_CHECK_TAIL + +#define REFCOUNT_POST_CHECK_NEG_OR_ZERO \ +" b.eq 33f\n" \ + REFCOUNT_POST_CHECK_NEG + +#define REFCOUNT_PRE_CHECK_ZERO(reg) "ccmp " #reg ", wzr, #8, pl\n" +#define REFCOUNT_PRE_CHECK_NONE(reg) + #define __ARM64_IN_ATOMIC_IMPL #if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE) diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index f5a2d09afb38..eedf79f425dd 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -327,4 +327,54 @@ __CMPXCHG_DBL(_mb, dmb ish, l, "memory") #undef __CMPXCHG_DBL +#define REFCOUNT_OP(op, asm_op, pre, post, l) \ +__LL_SC_INLINE int \ +__LL_SC_PREFIX(__refcount_##op(int i, atomic_t *r)) \ +{ \ + unsigned int tmp; \ + int result; \ + \ + asm volatile("// refcount_" #op "\n" \ +" prfm pstl1strm, %[cval]\n" \ +"1: ldxr %w1, %[cval]\n" \ +" " #asm_op " %w[val], %w1, %w[i]\n" \ + REFCOUNT_PRE_CHECK_ ## pre (%w1) \ +" st" #l "xr %w1, %w[val], %[cval]\n" \ +" cbnz %w1, 1b\n" \ + REFCOUNT_POST_CHECK_ ## post \ + : [val] "=&r"(result), "=&r"(tmp), [cval] "+Q"(r->counter) \ + : [counter] "r"(&r->counter), [i] "Ir" (i) \ + : "cc"); \ + \ + return result; \ +} \ +__LL_SC_EXPORT(__refcount_##op); + +REFCOUNT_OP(add_lt, adds, ZERO, NEG_OR_ZERO, ); +REFCOUNT_OP(sub_lt, subs, NONE, NEG, l); +REFCOUNT_OP(sub_le, subs, NONE, NEG_OR_ZERO, l); + +__LL_SC_INLINE int +__LL_SC_PREFIX(__refcount_add_not_zero(int i, atomic_t *r)) +{ + unsigned int tmp; + int result; + + asm volatile("// refcount_add_not_zero\n" +" prfm pstl1strm, %[cval]\n" +"1: ldxr %w[val], %[cval]\n" +" cbz %w[val], 2f\n" +" adds %w[val], %w[val], %w[i]\n" +" stxr %w1, %w[val], %[cval]\n" +" cbnz %w1, 1b\n" + REFCOUNT_POST_CHECK_NEG +"2:" + : [val] "=&r" (result), "=&r" (tmp), [cval] "+Q" (r->counter) + : [counter] "r"(&r->counter), [i] "Ir" (i) + : "cc"); + + return result; +} +__LL_SC_EXPORT(__refcount_add_not_zero); + #endif /* __ASM_ATOMIC_LL_SC_H */ diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index f9b0b09153e0..9c0d23856251 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -531,4 +531,85 @@ __CMPXCHG_DBL(_mb, al, "memory") #undef __LL_SC_CMPXCHG_DBL #undef __CMPXCHG_DBL +#define REFCOUNT_ADD_OP(op, pre, post) \ +static inline int __refcount_##op(int i, atomic_t *r) \ +{ \ + register int w0 asm ("w0") = i; \ + register atomic_t *x1 asm ("x1") = r; \ + \ + asm volatile(ARM64_LSE_ATOMIC_INSN( \ + /* LL/SC */ \ + __LL_SC_CALL(__refcount_##op) \ + " cmp %w0, wzr\n" \ + __nops(1), \ + /* LSE atomics */ \ + " ldadd %w[i], w30, %[cval]\n" \ + " adds %w[i], %w[i], w30\n" \ + REFCOUNT_PRE_CHECK_ ## pre (w30)) \ + REFCOUNT_POST_CHECK_ ## post \ + : [i] "+r" (w0), [cval] "+Q" (r->counter) \ + : [counter] "r"(&r->counter), "r" (x1) \ + : __LL_SC_CLOBBERS, "cc"); \ + \ + return w0; \ +} + +REFCOUNT_ADD_OP(add_lt, ZERO, NEG_OR_ZERO); + +#define REFCOUNT_SUB_OP(op, post) \ +static inline int __refcount_##op(int i, atomic_t *r) \ +{ \ + register int w0 asm ("w0") = i; \ + register atomic_t *x1 asm ("x1") = r; \ + \ + asm volatile(ARM64_LSE_ATOMIC_INSN( \ + /* LL/SC */ \ + __LL_SC_CALL(__refcount_##op) \ + " cmp %w0, wzr\n" \ + __nops(1), \ + /* LSE atomics */ \ + " neg %w[i], %w[i]\n" \ + " ldaddl %w[i], w30, %[cval]\n" \ + " adds %w[i], %w[i], w30\n") \ + REFCOUNT_POST_CHECK_ ## post \ + : [i] "+r" (w0), [cval] "+Q" (r->counter) \ + : [counter] "r" (&r->counter), "r" (x1) \ + : __LL_SC_CLOBBERS, "cc"); \ + \ + return w0; \ +} + +REFCOUNT_SUB_OP(sub_lt, NEG); +REFCOUNT_SUB_OP(sub_le, NEG_OR_ZERO); + +static inline int __refcount_add_not_zero(int i, atomic_t *r) +{ + register int result asm ("w0"); + register atomic_t *x1 asm ("x1") = r; + + asm volatile(ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ + " mov %w0, %w[i]\n" + __LL_SC_CALL(__refcount_add_not_zero) + " cmp %w0, wzr\n" + __nops(6), + /* LSE atomics */ + " ldr %w0, %[cval]\n" + "1: cmp %w0, wzr\n" + " b.eq 2f\n" + " add w30, %w0, %w[i]\n" + " cas %w0, w30, %[cval]\n" + " sub w30, w30, %w[i]\n" + " cmp %w0, w30\n" + " b.ne 1b\n" + " adds %w0, w30, %w[i]\n" + "2:\n") + REFCOUNT_POST_CHECK_NEG + : "=&r" (result), [cval] "+Q" (r->counter) + : [counter] "r" (&r->counter), [i] "Ir" (i), "r" (x1) + : __LL_SC_CLOBBERS, "cc"); + + return result; +} + #endif /* __ASM_ATOMIC_LSE_H */ diff --git a/arch/arm64/include/asm/brk-imm.h b/arch/arm64/include/asm/brk-imm.h index 2945fe6cd863..a38160087772 100644 --- a/arch/arm64/include/asm/brk-imm.h +++ b/arch/arm64/include/asm/brk-imm.h @@ -19,6 +19,7 @@ * 0x9xx: tag-based KASAN trap (allowed values 0x900 - 0x9ff) */ #define FAULT_BRK_IMM 0x100 +#define REFCOUNT_BRK_IMM 0x101 #define KGDB_DYN_DBG_BRK_IMM 0x400 #define KGDB_COMPILED_DBG_BRK_IMM 0x401 #define BUG_BRK_IMM 0x800 diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 368d99b65f7a..0507f53da93f 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -353,7 +353,7 @@ static inline bool cpu_have_feature(unsigned int num) } /* System capability check for constant caps */ -static inline bool __cpus_have_const_cap(int num) +static __always_inline bool __cpus_have_const_cap(int num) { if (num >= ARM64_NCAPS) return false; @@ -367,7 +367,7 @@ static inline bool cpus_have_cap(unsigned int num) return test_bit(num, cpu_hwcaps); } -static inline bool cpus_have_const_cap(int num) +static __always_inline bool cpus_have_const_cap(int num) { if (static_branch_likely(&arm64_const_caps_ready)) return __cpus_have_const_cap(num); diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 790d02358f70..948daa972c6f 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -95,18 +95,24 @@ struct step_hook { int (*fn)(struct pt_regs *regs, unsigned int esr); }; -void register_step_hook(struct step_hook *hook); -void unregister_step_hook(struct step_hook *hook); +void register_user_step_hook(struct step_hook *hook); +void unregister_user_step_hook(struct step_hook *hook); + +void register_kernel_step_hook(struct step_hook *hook); +void unregister_kernel_step_hook(struct step_hook *hook); struct break_hook { struct list_head node; - u32 esr_val; - u32 esr_mask; int (*fn)(struct pt_regs *regs, unsigned int esr); + u16 imm; + u16 mask; /* These bits are ignored when comparing with imm */ }; -void register_break_hook(struct break_hook *hook); -void unregister_break_hook(struct break_hook *hook); +void register_user_break_hook(struct break_hook *hook); +void unregister_user_break_hook(struct break_hook *hook); + +void register_kernel_break_hook(struct break_hook *hook); +void unregister_kernel_break_hook(struct break_hook *hook); u8 debug_monitors_arch(void); diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index e174fe5e208a..49bb9a020a09 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -30,35 +30,38 @@ #include #include #include -#include #include /* * Generic IO read/write. These perform native-endian accesses. - * that some architectures will want to re-define __raw_{read,write}w. */ -static inline void __raw_writeb_no_log(u8 val, volatile void __iomem *addr) +#define __raw_writeb __raw_writeb +static inline void __raw_writeb(u8 val, volatile void __iomem *addr) { asm volatile("strb %w0, [%1]" : : "rZ" (val), "r" (addr)); } -static inline void __raw_writew_no_log(u16 val, volatile void __iomem *addr) +#define __raw_writew __raw_writew +static inline void __raw_writew(u16 val, volatile void __iomem *addr) { asm volatile("strh %w0, [%1]" : : "rZ" (val), "r" (addr)); } -static inline void __raw_writel_no_log(u32 val, volatile void __iomem *addr) +#define __raw_writel __raw_writel +static inline void __raw_writel(u32 val, volatile void __iomem *addr) { asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr)); } -static inline void __raw_writeq_no_log(u64 val, volatile void __iomem *addr) +#define __raw_writeq __raw_writeq +static inline void __raw_writeq(u64 val, volatile void __iomem *addr) { asm volatile("str %x0, [%1]" : : "rZ" (val), "r" (addr)); } -static inline u8 __raw_readb_no_log(const volatile void __iomem *addr) +#define __raw_readb __raw_readb +static inline u8 __raw_readb(const volatile void __iomem *addr) { u8 val; asm volatile(ALTERNATIVE("ldrb %w0, [%1]", @@ -68,7 +71,8 @@ static inline u8 __raw_readb_no_log(const volatile void __iomem *addr) return val; } -static inline u16 __raw_readw_no_log(const volatile void __iomem *addr) +#define __raw_readw __raw_readw +static inline u16 __raw_readw(const volatile void __iomem *addr) { u16 val; @@ -79,7 +83,8 @@ static inline u16 __raw_readw_no_log(const volatile void __iomem *addr) return val; } -static inline u32 __raw_readl_no_log(const volatile void __iomem *addr) +#define __raw_readl __raw_readl +static inline u32 __raw_readl(const volatile void __iomem *addr) { u32 val; asm volatile(ALTERNATIVE("ldr %w0, [%1]", @@ -89,7 +94,8 @@ static inline u32 __raw_readl_no_log(const volatile void __iomem *addr) return val; } -static inline u64 __raw_readq_no_log(const volatile void __iomem *addr) +#define __raw_readq __raw_readq +static inline u64 __raw_readq(const volatile void __iomem *addr) { u64 val; asm volatile(ALTERNATIVE("ldr %0, [%1]", @@ -99,46 +105,6 @@ static inline u64 __raw_readq_no_log(const volatile void __iomem *addr) return val; } -/* - * There may be cases when clients don't want to support or can't support the - * logging, The appropriate functions can be used but clinets should carefully - * consider why they can't support the logging - */ - -#define __raw_write_logged(v, a, _t) ({ \ - int _ret; \ - volatile void __iomem *_a = (a); \ - void *_addr = (void __force *)(_a); \ - _ret = uncached_logk(LOGK_WRITEL, _addr); \ - ETB_WAYPOINT; \ - __raw_write##_t##_no_log((v), _a); \ - if (_ret) \ - LOG_BARRIER; \ - }) - -#define __raw_writeb(v, a) __raw_write_logged((v), a, b) -#define __raw_writew(v, a) __raw_write_logged((v), a, w) -#define __raw_writel(v, a) __raw_write_logged((v), a, l) -#define __raw_writeq(v, a) __raw_write_logged((v), a, q) - -#define __raw_read_logged(a, _l, _t) ({ \ - _t __a; \ - const volatile void __iomem *_a = (a); \ - void *_addr = (void __force *)(_a); \ - int _ret; \ - _ret = uncached_logk(LOGK_READL, _addr); \ - ETB_WAYPOINT; \ - __a = __raw_read##_l##_no_log(_a); \ - if (_ret) \ - LOG_BARRIER; \ - __a; \ - }) - -#define __raw_readb(a) __raw_read_logged((a), b, u8) -#define __raw_readw(a) __raw_read_logged((a), w, u16) -#define __raw_readl(a) __raw_read_logged((a), l, u32) -#define __raw_readq(a) __raw_read_logged((a), q, u64) - /* IO barriers */ #define __iormb(v) \ ({ \ @@ -176,22 +142,6 @@ static inline u64 __raw_readq_no_log(const volatile void __iomem *addr) #define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c))) #define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c))) -#define readb_relaxed_no_log(c) ({ u8 __v = __raw_readb_no_log(c); __v; }) -#define readw_relaxed_no_log(c) \ - ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw_no_log(c)); __v; }) -#define readl_relaxed_no_log(c) \ - ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl_no_log(c)); __v; }) -#define readq_relaxed_no_log(c) \ - ({ u64 __v = le64_to_cpu((__force __le64)__raw_readq_no_log(c)); __v; }) - -#define writeb_relaxed_no_log(v, c) ((void)__raw_writeb_no_log((v), (c))) -#define writew_relaxed_no_log(v, c) \ - ((void)__raw_writew_no_log((__force u16)cpu_to_le32(v), (c))) -#define writel_relaxed_no_log(v, c) \ - ((void)__raw_writel_no_log((__force u32)cpu_to_le32(v), (c))) -#define writeq_relaxed_no_log(v, c) \ - ((void)__raw_writeq_no_log((__force u64)cpu_to_le32(v), (c))) - /* * I/O memory access primitives. Reads are ordered relative to any * following Normal memory access. Writes are ordered relative to any prior @@ -207,24 +157,6 @@ static inline u64 __raw_readq_no_log(const volatile void __iomem *addr) #define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); }) #define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c)); }) -#define readb_no_log(c) \ - ({ u8 __v = readb_relaxed_no_log(c); __iormb(__v); __v; }) -#define readw_no_log(c) \ - ({ u16 __v = readw_relaxed_no_log(c); __iormb(__v); __v; }) -#define readl_no_log(c) \ - ({ u32 __v = readl_relaxed_no_log(c); __iormb(__v); __v; }) -#define readq_no_log(c) \ - ({ u64 __v = readq_relaxed_no_log(c); __iormb(__v); __v; }) - -#define writeb_no_log(v, c) \ - ({ __iowmb(); writeb_relaxed_no_log((v), (c)); }) -#define writew_no_log(v, c) \ - ({ __iowmb(); writew_relaxed_no_log((v), (c)); }) -#define writel_no_log(v, c) \ - ({ __iowmb(); writel_relaxed_no_log((v), (c)); }) -#define writeq_no_log(v, c) \ - ({ __iowmb(); writeq_relaxed_no_log((v), (c)); }) - /* * I/O port access primitives. */ diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h index 7e2b3e360086..472023498d71 100644 --- a/arch/arm64/include/asm/jump_label.h +++ b/arch/arm64/include/asm/jump_label.h @@ -26,13 +26,16 @@ #define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE -static __always_inline bool arch_static_branch(struct static_key *key, bool branch) +static __always_inline bool arch_static_branch(struct static_key *key, + bool branch) { - asm_volatile_goto("1: nop\n\t" - ".pushsection __jump_table, \"aw\"\n\t" - ".align 3\n\t" - ".quad 1b, %l[l_yes], %c0\n\t" - ".popsection\n\t" + asm_volatile_goto( + "1: nop \n\t" + " .pushsection __jump_table, \"aw\" \n\t" + " .align 3 \n\t" + " .long 1b - ., %l[l_yes] - . \n\t" + " .quad %c0 - . \n\t" + " .popsection \n\t" : : "i"(&((char *)key)[branch]) : : l_yes); return false; @@ -40,13 +43,16 @@ l_yes: return true; } -static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +static __always_inline bool arch_static_branch_jump(struct static_key *key, + bool branch) { - asm_volatile_goto("1: b %l[l_yes]\n\t" - ".pushsection __jump_table, \"aw\"\n\t" - ".align 3\n\t" - ".quad 1b, %l[l_yes], %c0\n\t" - ".popsection\n\t" + asm_volatile_goto( + "1: b %l[l_yes] \n\t" + " .pushsection __jump_table, \"aw\" \n\t" + " .align 3 \n\t" + " .long 1b - ., %l[l_yes] - . \n\t" + " .quad %c0 - . \n\t" + " .popsection \n\t" : : "i"(&((char *)key)[branch]) : : l_yes); return false; @@ -54,13 +60,5 @@ l_yes: return true; } -typedef u64 jump_label_t; - -struct jump_entry { - jump_label_t code; - jump_label_t target; - jump_label_t key; -}; - #endif /* __ASSEMBLY__ */ #endif /* __ASM_JUMP_LABEL_H */ diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 160059ab05c3..7214bd926014 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -34,21 +34,16 @@ #include #include #include -#include extern bool rodata_full; static inline void contextidr_thread_switch(struct task_struct *next) { - pid_t pid = task_pid_nr(next); - if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR)) return; - write_sysreg(pid, contextidr_el1); + write_sysreg(task_pid_nr(next), contextidr_el1); isb(); - - } /* diff --git a/arch/arm64/include/asm/neon-intrinsics.h b/arch/arm64/include/asm/neon-intrinsics.h new file mode 100644 index 000000000000..71abfc7612b2 --- /dev/null +++ b/arch/arm64/include/asm/neon-intrinsics.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2018 Linaro, Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_NEON_INTRINSICS_H +#define __ASM_NEON_INTRINSICS_H + +#include + +/* + * In the kernel, u64/s64 are [un]signed long long, not [un]signed long. + * So by redefining these macros to the former, we can force gcc-stdint.h + * to define uint64_t / in64_t in a compatible manner. + */ + +#ifdef __INT64_TYPE__ +#undef __INT64_TYPE__ +#define __INT64_TYPE__ long long +#endif + +#ifdef __UINT64_TYPE__ +#undef __UINT64_TYPE__ +#define __UINT64_TYPE__ unsigned long long +#endif + +/* + * genksyms chokes on the ARM NEON instrinsics system header, but we + * don't export anything it defines anyway, so just disregard when + * genksyms execute. + */ +#ifndef __GENKSYMS__ +#include +#endif + +#ifdef CONFIG_CC_IS_CLANG +#pragma clang diagnostic ignored "-Wincompatible-pointer-types" +#endif + +#endif /* __ASM_NEON_INTRINSICS_H */ diff --git a/arch/arm64/include/asm/refcount.h b/arch/arm64/include/asm/refcount.h new file mode 100644 index 000000000000..3c99b29f4549 --- /dev/null +++ b/arch/arm64/include/asm/refcount.h @@ -0,0 +1,60 @@ +/* + * arm64-specific implementation of refcount_t. Based on x86 version and + * PAX_REFCOUNT from PaX/grsecurity. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_REFCOUNT_H +#define __ASM_REFCOUNT_H + +#include + +#include + +static __always_inline void refcount_add(int i, refcount_t *r) +{ + __refcount_add_lt(i, &r->refs); +} + +static __always_inline void refcount_inc(refcount_t *r) +{ + __refcount_add_lt(1, &r->refs); +} + +static __always_inline void refcount_dec(refcount_t *r) +{ + __refcount_sub_le(1, &r->refs); +} + +static __always_inline __must_check bool refcount_sub_and_test(unsigned int i, + refcount_t *r) +{ + bool ret = __refcount_sub_lt(i, &r->refs) == 0; + + if (ret) { + smp_acquire__after_ctrl_dep(); + return true; + } + return false; +} + +static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r) +{ + return refcount_sub_and_test(1, r); +} + +static __always_inline __must_check bool refcount_add_not_zero(unsigned int i, + refcount_t *r) +{ + return __refcount_add_not_zero(i, &r->refs) != 0; +} + +static __always_inline __must_check bool refcount_inc_not_zero(refcount_t *r) +{ + return __refcount_add_not_zero(1, &r->refs) != 0; +} + +#endif diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index f82b447bd34f..b58ea9338f4e 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -95,14 +95,7 @@ extern void secondary_entry(void); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); -#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask); -#else -static inline void arch_send_wakeup_ipi_mask(const struct cpumask *mask) -{ - BUILD_BUG(); -} -#endif extern int __cpu_disable(void); diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index e5fa4001e4ff..ff2d7ab23d59 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -38,8 +38,7 @@ arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o -arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_trace_counters.o \ - perf_trace_user.o +arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o arm64-obj-$(CONFIG_CPU_PM) += sleep.o suspend.o arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index edb63bf2ac1c..cd2700829a09 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -162,25 +162,46 @@ static void clear_regs_spsr_ss(struct pt_regs *regs) } NOKPROBE_SYMBOL(clear_regs_spsr_ss); -/* EL1 Single Step Handler hooks */ -static LIST_HEAD(step_hook); -static DEFINE_SPINLOCK(step_hook_lock); +static DEFINE_SPINLOCK(debug_hook_lock); +static LIST_HEAD(user_step_hook); +static LIST_HEAD(kernel_step_hook); -void register_step_hook(struct step_hook *hook) +static void register_debug_hook(struct list_head *node, struct list_head *list) { - spin_lock(&step_hook_lock); - list_add_rcu(&hook->node, &step_hook); - spin_unlock(&step_hook_lock); + spin_lock(&debug_hook_lock); + list_add_rcu(node, list); + spin_unlock(&debug_hook_lock); + } -void unregister_step_hook(struct step_hook *hook) +static void unregister_debug_hook(struct list_head *node) { - spin_lock(&step_hook_lock); - list_del_rcu(&hook->node); - spin_unlock(&step_hook_lock); + spin_lock(&debug_hook_lock); + list_del_rcu(node); + spin_unlock(&debug_hook_lock); synchronize_rcu(); } +void register_user_step_hook(struct step_hook *hook) +{ + register_debug_hook(&hook->node, &user_step_hook); +} + +void unregister_user_step_hook(struct step_hook *hook) +{ + unregister_debug_hook(&hook->node); +} + +void register_kernel_step_hook(struct step_hook *hook) +{ + register_debug_hook(&hook->node, &kernel_step_hook); +} + +void unregister_kernel_step_hook(struct step_hook *hook) +{ + unregister_debug_hook(&hook->node); +} + /* * Call registered single step handlers * There is no Syndrome info to check for determining the handler. @@ -190,11 +211,14 @@ void unregister_step_hook(struct step_hook *hook) static int call_step_hook(struct pt_regs *regs, unsigned int esr) { struct step_hook *hook; + struct list_head *list; int retval = DBG_HOOK_ERROR; + list = user_mode(regs) ? &user_step_hook : &kernel_step_hook; + rcu_read_lock(); - list_for_each_entry_rcu(hook, &step_hook, node) { + list_for_each_entry_rcu(hook, list, node) { retval = hook->fn(regs, esr); if (retval == DBG_HOOK_HANDLED) break; @@ -272,33 +296,44 @@ NOKPROBE_SYMBOL(single_step_handler); * hit within breakpoint handler, especically in kprobes. * Use reader/writer locks instead of plain spinlock. */ -static LIST_HEAD(break_hook); -static DEFINE_SPINLOCK(break_hook_lock); +static LIST_HEAD(user_break_hook); +static LIST_HEAD(kernel_break_hook); -void register_break_hook(struct break_hook *hook) +void register_user_break_hook(struct break_hook *hook) { - spin_lock(&break_hook_lock); - list_add_rcu(&hook->node, &break_hook); - spin_unlock(&break_hook_lock); + register_debug_hook(&hook->node, &user_break_hook); } -void unregister_break_hook(struct break_hook *hook) +void unregister_user_break_hook(struct break_hook *hook) { - spin_lock(&break_hook_lock); - list_del_rcu(&hook->node); - spin_unlock(&break_hook_lock); - synchronize_rcu(); + unregister_debug_hook(&hook->node); +} + +void register_kernel_break_hook(struct break_hook *hook) +{ + register_debug_hook(&hook->node, &kernel_break_hook); +} + +void unregister_kernel_break_hook(struct break_hook *hook) +{ + unregister_debug_hook(&hook->node); } static int call_break_hook(struct pt_regs *regs, unsigned int esr) { struct break_hook *hook; + struct list_head *list; int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL; + list = user_mode(regs) ? &user_break_hook : &kernel_break_hook; + rcu_read_lock(); - list_for_each_entry_rcu(hook, &break_hook, node) - if ((esr & hook->esr_mask) == hook->esr_val) + list_for_each_entry_rcu(hook, list, node) { + unsigned int comment = esr & BRK64_ESR_MASK; + + if ((comment & ~hook->mask) == hook->imm) fn = hook->fn; + } rcu_read_unlock(); return fn ? fn(regs, esr) : DBG_HOOK_ERROR; diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h index 12af2ba8d558..65eae7b52b6b 100644 --- a/arch/arm64/kernel/image.h +++ b/arch/arm64/kernel/image.h @@ -73,11 +73,7 @@ #ifdef CONFIG_EFI -/* - * Use ABSOLUTE() to avoid ld.lld treating this as a relative symbol: - * https://github.com/ClangBuiltLinux/linux/issues/561 - */ -__efistub_stext_offset = ABSOLUTE(stext - _text); +__efistub_stext_offset = stext - _text; /* * The EFI stub has its own symbol namespace prefixed by __efistub_, to @@ -105,9 +101,6 @@ __efistub___memmove = __pi_memmove; __efistub___memset = __pi_memset; #endif -__efistub__text = _text; -__efistub__end = _end; -__efistub__edata = _edata; __efistub_screen_info = screen_info; #endif diff --git a/arch/arm64/kernel/io.c b/arch/arm64/kernel/io.c index b08c4ebc7f0d..354be2a872ae 100644 --- a/arch/arm64/kernel/io.c +++ b/arch/arm64/kernel/io.c @@ -27,21 +27,21 @@ void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) { while (count && (!IS_ALIGNED((unsigned long)from, 8) || !IS_ALIGNED((unsigned long)to, 8))) { - *(u8 *)to = __raw_readb_no_log(from); + *(u8 *)to = __raw_readb(from); from++; to++; count--; } while (count >= 8) { - *(u64 *)to = __raw_readq_no_log(from); + *(u64 *)to = __raw_readq(from); from += 8; to += 8; count -= 8; } while (count) { - *(u8 *)to = __raw_readb_no_log(from); + *(u8 *)to = __raw_readb(from); from++; to++; count--; @@ -56,21 +56,21 @@ void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count) { while (count && (!IS_ALIGNED((unsigned long)to, 8) || !IS_ALIGNED((unsigned long)from, 8))) { - __raw_writeb_no_log(*(volatile u8 *)from, to); + __raw_writeb(*(volatile u8 *)from, to); from++; to++; count--; } while (count >= 8) { - __raw_writeq_no_log(*(volatile u64 *)from, to); + __raw_writeq(*(volatile u64 *)from, to); from += 8; to += 8; count -= 8; } while (count) { - __raw_writeb_no_log(*(volatile u8 *)from, to); + __raw_writeb(*(volatile u8 *)from, to); from++; to++; count--; @@ -90,19 +90,19 @@ void __memset_io(volatile void __iomem *dst, int c, size_t count) qc |= qc << 32; while (count && !IS_ALIGNED((unsigned long)dst, 8)) { - __raw_writeb_no_log(c, dst); + __raw_writeb(c, dst); dst++; count--; } while (count >= 8) { - __raw_writeq_no_log(qc, dst); + __raw_writeq(qc, dst); dst += 8; count -= 8; } while (count) { - __raw_writeb_no_log(c, dst); + __raw_writeb(c, dst); dst++; count--; } diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c index c2dd1ad3e648..903d17023d77 100644 --- a/arch/arm64/kernel/jump_label.c +++ b/arch/arm64/kernel/jump_label.c @@ -25,12 +25,12 @@ void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { - void *addr = (void *)entry->code; + void *addr = (void *)jump_entry_code(entry); u32 insn; if (type == JUMP_LABEL_JMP) { - insn = aarch64_insn_gen_branch_imm(entry->code, - entry->target, + insn = aarch64_insn_gen_branch_imm(jump_entry_code(entry), + jump_entry_target(entry), AARCH64_INSN_BRANCH_NOLINK); } else { insn = aarch64_insn_gen_nop(); diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index 470afb3a04ca..dbf6c3b7ea7e 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c @@ -264,15 +264,13 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) NOKPROBE_SYMBOL(kgdb_step_brk_fn); static struct break_hook kgdb_brkpt_hook = { - .esr_mask = 0xffffffff, - .esr_val = (u32)ESR_ELx_VAL_BRK64(KGDB_DYN_DBG_BRK_IMM), - .fn = kgdb_brk_fn + .fn = kgdb_brk_fn, + .imm = KGDB_DYN_DBG_BRK_IMM, }; static struct break_hook kgdb_compiled_brkpt_hook = { - .esr_mask = 0xffffffff, - .esr_val = (u32)ESR_ELx_VAL_BRK64(KGDB_COMPILED_DBG_BRK_IMM), - .fn = kgdb_compiled_brk_fn + .fn = kgdb_compiled_brk_fn, + .imm = KGDB_COMPILED_DBG_BRK_IMM, }; static struct step_hook kgdb_step_hook = { @@ -333,9 +331,9 @@ int kgdb_arch_init(void) if (ret != 0) return ret; - register_break_hook(&kgdb_brkpt_hook); - register_break_hook(&kgdb_compiled_brkpt_hook); - register_step_hook(&kgdb_step_hook); + register_kernel_break_hook(&kgdb_brkpt_hook); + register_kernel_break_hook(&kgdb_compiled_brkpt_hook); + register_kernel_step_hook(&kgdb_step_hook); return 0; } @@ -346,9 +344,9 @@ int kgdb_arch_init(void) */ void kgdb_arch_exit(void) { - unregister_break_hook(&kgdb_brkpt_hook); - unregister_break_hook(&kgdb_compiled_brkpt_hook); - unregister_step_hook(&kgdb_step_hook); + unregister_kernel_break_hook(&kgdb_brkpt_hook); + unregister_kernel_break_hook(&kgdb_compiled_brkpt_hook); + unregister_kernel_step_hook(&kgdb_step_hook); unregister_die_notifier(&kgdb_notifier); } diff --git a/arch/arm64/kernel/module.lds b/arch/arm64/kernel/module.lds index 99eb7c292494..3e157ae9462d 100644 --- a/arch/arm64/kernel/module.lds +++ b/arch/arm64/kernel/module.lds @@ -2,4 +2,12 @@ SECTIONS { .plt : { BYTE(0) } .init.plt : { BYTE(0) } .text.ftrace_trampoline : { BYTE(0) } + + /* Undo -fdata-sections and -ffunction-sections */ + .bss : { *(.bss .bss.[0-9a-zA-Z_]*) } + .data : { *(.data .data.[0-9a-zA-Z_]*) } + .rela.data : { *(.rela.data .rela.data.[0-9a-zA-Z_]*) } + .rela.text : { *(.rela.text .rela.text.[0-9a-zA-Z_]*) } + .rodata : { *(.rodata .rodata.[0-9a-zA-Z_]*) } + .text : { *(.text .text.[0-9a-zA-Z_]*) } } diff --git a/arch/arm64/kernel/perf_trace_counters.c b/arch/arm64/kernel/perf_trace_counters.c deleted file mode 100644 index 47983480326e..000000000000 --- a/arch/arm64/kernel/perf_trace_counters.c +++ /dev/null @@ -1,178 +0,0 @@ -/* Copyright (c) 2013-2014, 2017 The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#include -#include -#include -#include -#include -#define CREATE_TRACE_POINTS -#include "perf_trace_counters.h" - -static unsigned int tp_pid_state; - -DEFINE_PER_CPU(u32, cntenset_val); -DEFINE_PER_CPU(u32, previous_ccnt); -DEFINE_PER_CPU(u32[NUM_L1_CTRS], previous_l1_cnts); -DEFINE_PER_CPU(u32, old_pid); -DEFINE_PER_CPU(u32, hotplug_flag); - -#define USE_CPUHP_STATE CPUHP_AP_ONLINE - -static int tracectr_cpu_hotplug_coming_up(unsigned int cpu) -{ - per_cpu(hotplug_flag, cpu) = 1; - - return 0; -} - -static void setup_prev_cnts(u32 cpu, u32 cnten_val) -{ - int i; - - if (cnten_val & CC) - per_cpu(previous_ccnt, cpu) = - read_sysreg(pmccntr_el0); - - for (i = 0; i < NUM_L1_CTRS; i++) { - if (cnten_val & (1 << i)) { - /* Select */ - write_sysreg(i, pmselr_el0); - isb(); - /* Read value */ - per_cpu(previous_l1_cnts[i], cpu) = - read_sysreg(pmxevcntr_el0); - } - } -} - -void tracectr_notifier(void *ignore, bool preempt, - struct task_struct *prev, struct task_struct *next) -{ - u32 cnten_val; - int current_pid; - u32 cpu = task_cpu(next); - - if (tp_pid_state != 1) - return; - current_pid = next->pid; - if (per_cpu(old_pid, cpu) != -1) { - cnten_val = read_sysreg(pmcntenset_el0); - per_cpu(cntenset_val, cpu) = cnten_val; - /* Disable all the counters that were enabled */ - write_sysreg(cnten_val, pmcntenclr_el0); - - if (per_cpu(hotplug_flag, cpu) == 1) { - per_cpu(hotplug_flag, cpu) = 0; - setup_prev_cnts(cpu, cnten_val); - } else { - trace_sched_switch_with_ctrs(per_cpu(old_pid, cpu), - current_pid); - } - - /* Enable all the counters that were disabled */ - write_sysreg(cnten_val, pmcntenset_el0); - } - per_cpu(old_pid, cpu) = current_pid; -} - -static void enable_tp_pid(void) -{ - if (tp_pid_state == 0) { - tp_pid_state = 1; - register_trace_sched_switch(tracectr_notifier, NULL); - } -} - -static void disable_tp_pid(void) -{ - if (tp_pid_state == 1) { - tp_pid_state = 0; - unregister_trace_sched_switch(tracectr_notifier, NULL); - } -} - -static ssize_t read_enabled_perftp_file_bool(struct file *file, - char __user *user_buf, size_t count, loff_t *ppos) -{ - char buf[2]; - - buf[1] = '\n'; - if (tp_pid_state == 0) - buf[0] = '0'; - else - buf[0] = '1'; - return simple_read_from_buffer(user_buf, count, ppos, buf, 2); -} - -static ssize_t write_enabled_perftp_file_bool(struct file *file, - const char __user *user_buf, size_t count, loff_t *ppos) -{ - char buf[32]; - size_t buf_size; - - buf[0] = 0; - buf_size = min(count, (sizeof(buf)-1)); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - switch (buf[0]) { - case 'y': - case 'Y': - case '1': - enable_tp_pid(); - break; - case 'n': - case 'N': - case '0': - disable_tp_pid(); - break; - } - - return count; -} - -static const struct file_operations fops_perftp = { - .read = read_enabled_perftp_file_bool, - .write = write_enabled_perftp_file_bool, - .llseek = default_llseek, -}; - -int __init init_tracecounters(void) -{ - struct dentry *dir; - struct dentry *file; - unsigned int value = 1; - int cpu, rc; - - dir = debugfs_create_dir("perf_debug_tp", NULL); - if (!dir) - return -ENOMEM; - file = debugfs_create_file("enabled", 0660, dir, - &value, &fops_perftp); - if (!file) { - debugfs_remove(dir); - return -ENOMEM; - } - for_each_possible_cpu(cpu) - per_cpu(old_pid, cpu) = -1; - rc = cpuhp_setup_state_nocalls(USE_CPUHP_STATE, - "tracectr_cpu_hotplug", - tracectr_cpu_hotplug_coming_up, - NULL); - return 0; -} - -int __exit exit_tracecounters(void) -{ - cpuhp_remove_state_nocalls(USE_CPUHP_STATE); - return 0; -} -late_initcall(init_tracecounters); diff --git a/arch/arm64/kernel/perf_trace_counters.h b/arch/arm64/kernel/perf_trace_counters.h deleted file mode 100644 index 660f6ce03b44..000000000000 --- a/arch/arm64/kernel/perf_trace_counters.h +++ /dev/null @@ -1,110 +0,0 @@ -/* Copyright (c) 2013-2014,2017 The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM perf_trace_counters - -#if !defined(_PERF_TRACE_COUNTERS_H_) || defined(TRACE_HEADER_MULTI_READ) -#define _PERF_TRACE_COUNTERS_H_ - -/* Ctr index for PMCNTENSET/CLR */ -#define CC 0x80000000 -#define C0 0x1 -#define C1 0x2 -#define C2 0x4 -#define C3 0x8 -#define C4 0x10 -#define C5 0x20 -#define C_ALL (CC | C0 | C1 | C2 | C3 | C4 | C5) -#define NUM_L1_CTRS 6 - -#include -#include -#include - -DECLARE_PER_CPU(u32, cntenset_val); -DECLARE_PER_CPU(u32, previous_ccnt); -DECLARE_PER_CPU(u32[NUM_L1_CTRS], previous_l1_cnts); -TRACE_EVENT(sched_switch_with_ctrs, - - TP_PROTO(pid_t prev, pid_t next), - - TP_ARGS(prev, next), - - TP_STRUCT__entry( - __field(pid_t, old_pid) - __field(pid_t, new_pid) - __field(u32, cctr) - __field(u32, ctr0) - __field(u32, ctr1) - __field(u32, ctr2) - __field(u32, ctr3) - __field(u32, ctr4) - __field(u32, ctr5) - ), - - TP_fast_assign( - u32 cpu = smp_processor_id(); - u32 i; - u32 cnten_val; - u32 total_ccnt = 0; - u32 total_cnt = 0; - u32 delta_l1_cnts[NUM_L1_CTRS]; - - __entry->old_pid = prev; - __entry->new_pid = next; - - cnten_val = per_cpu(cntenset_val, cpu); - - if (cnten_val & CC) { - /* Read value */ - total_ccnt = read_sysreg(pmccntr_el0); - __entry->cctr = total_ccnt - - per_cpu(previous_ccnt, cpu); - per_cpu(previous_ccnt, cpu) = total_ccnt; - } - for (i = 0; i < NUM_L1_CTRS; i++) { - if (cnten_val & (1 << i)) { - /* Select */ - write_sysreg(i, pmselr_el0); - isb(); - /* Read value */ - total_cnt = read_sysreg(pmxevcntr_el0); - delta_l1_cnts[i] = total_cnt - - per_cpu(previous_l1_cnts[i], cpu); - per_cpu(previous_l1_cnts[i], cpu) = - total_cnt; - } else - delta_l1_cnts[i] = 0; - } - - __entry->ctr0 = delta_l1_cnts[0]; - __entry->ctr1 = delta_l1_cnts[1]; - __entry->ctr2 = delta_l1_cnts[2]; - __entry->ctr3 = delta_l1_cnts[3]; - __entry->ctr4 = delta_l1_cnts[4]; - __entry->ctr5 = delta_l1_cnts[5]; - ), - - TP_printk("prev_pid=%d, next_pid=%d, CCNTR: %u, CTR0: %u, CTR1: %u, CTR2: %u, CTR3: %u, CTR4: %u, CTR5: %u", - __entry->old_pid, __entry->new_pid, - __entry->cctr, - __entry->ctr0, __entry->ctr1, - __entry->ctr2, __entry->ctr3, - __entry->ctr4, __entry->ctr5) -); - -#endif -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH ../../arch/arm64/kernel -#define TRACE_INCLUDE_FILE perf_trace_counters -#include diff --git a/arch/arm64/kernel/perf_trace_user.c b/arch/arm64/kernel/perf_trace_user.c deleted file mode 100644 index 0e83f82933ec..000000000000 --- a/arch/arm64/kernel/perf_trace_user.c +++ /dev/null @@ -1,96 +0,0 @@ -/* Copyright (c) 2014,2017 The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#include -#include -#include -#include -#include -#include -#include -#include - -#define CREATE_TRACE_POINTS -#include "perf_trace_user.h" - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM perf_trace_counters - -#define TRACE_USER_MAX_BUF_SIZE 100 - -static ssize_t perf_trace_write(struct file *file, - const char __user *user_string_in, - size_t len, loff_t *ppos) -{ - u32 cnten_val; - int rc; - char buf[TRACE_USER_MAX_BUF_SIZE + 1]; - ssize_t length; - - if (len == 0) - return 0; - - length = len > TRACE_USER_MAX_BUF_SIZE ? TRACE_USER_MAX_BUF_SIZE : len; - - rc = copy_from_user(buf, user_string_in, length); - if (rc) { - pr_err("%s copy_from_user failed, rc=%d\n", __func__, rc); - return -EFAULT; - } - - /* Remove any trailing newline and make sure string is terminated */ - if (buf[length - 1] == '\n') - buf[length - 1] = '\0'; - else - buf[length] = '\0'; - - /* - * Disable preemption to ensure that all the performance counter - * accesses happen on the same cpu - */ - preempt_disable(); - /* stop counters, call the trace function, restart them */ - - cnten_val = read_sysreg(pmcntenset_el0); - /* Disable all the counters that were enabled */ - write_sysreg(cnten_val, pmcntenclr_el0); - - trace_perf_trace_user(buf, cnten_val); - - /* Enable all the counters that were disabled */ - write_sysreg(cnten_val, pmcntenset_el0); - preempt_enable(); - - return length; -} - -static const struct file_operations perf_trace_fops = { - .write = perf_trace_write -}; - -static int __init init_perf_trace(void) -{ - struct dentry *dir; - struct dentry *file; - unsigned int value = 1; - - dir = debugfs_create_dir("msm_perf", NULL); - if (!dir) - return -ENOMEM; - file = debugfs_create_file("trace_marker", 0220, dir, - &value, &perf_trace_fops); - if (!file) - return -ENOMEM; - - return 0; -} - -late_initcall(init_perf_trace); diff --git a/arch/arm64/kernel/perf_trace_user.h b/arch/arm64/kernel/perf_trace_user.h deleted file mode 100644 index 4ef10e09d226..000000000000 --- a/arch/arm64/kernel/perf_trace_user.h +++ /dev/null @@ -1,84 +0,0 @@ -/* Copyright (c) 2014,2017 The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#if !defined(_PERF_TRACE_USER_H_) || defined(TRACE_HEADER_MULTI_READ) -#define _PERF_TRACE_USER_H_ - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM perf_trace_counters - -#include - -#define CNTENSET_CC 0x80000000 -#define NUM_L1_CTRS 6 - -TRACE_EVENT(perf_trace_user, - TP_PROTO(char *string, u32 cnten_val), - TP_ARGS(string, cnten_val), - - TP_STRUCT__entry( - __field(u32, cctr) - __field(u32, ctr0) - __field(u32, ctr1) - __field(u32, ctr2) - __field(u32, ctr3) - __field(u32, ctr4) - __field(u32, ctr5) - __string(user_string, string) - ), - - TP_fast_assign( - u32 cnt; - u32 l1_cnts[NUM_L1_CTRS]; - int i; - - if (cnten_val & CNTENSET_CC) { - /* Read value */ - cnt = read_sysreg(pmccntr_el0); - __entry->cctr = cnt; - } else - __entry->cctr = 0; - for (i = 0; i < NUM_L1_CTRS; i++) { - if (cnten_val & (1 << i)) { - /* Select */ - write_sysreg(i, pmselr_el0); - isb(); - /* Read value */ - cnt = read_sysreg(pmxevcntr_el0); - l1_cnts[i] = cnt; - } else { - l1_cnts[i] = 0; - } - } - - __entry->ctr0 = l1_cnts[0]; - __entry->ctr1 = l1_cnts[1]; - __entry->ctr2 = l1_cnts[2]; - __entry->ctr3 = l1_cnts[3]; - __entry->ctr4 = l1_cnts[4]; - __entry->ctr5 = l1_cnts[5]; - __assign_str(user_string, string); - ), - - TP_printk("CCNTR: %u, CTR0: %u, CTR1: %u, CTR2: %u, CTR3: %u, CTR4: %u, CTR5: %u, MSG=%s", - __entry->cctr, - __entry->ctr0, __entry->ctr1, - __entry->ctr2, __entry->ctr3, - __entry->ctr4, __entry->ctr5, - __get_str(user_string) - ) -); - -#endif -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH ../../arch/arm64/kernel -#define TRACE_INCLUDE_FILE perf_trace_user -#include diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 6a6d661f38fb..e43ae71f7a30 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -653,6 +653,7 @@ void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) return (void *)orig_ret_address; } +#ifdef CONFIG_KRETPROBES void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { @@ -666,6 +667,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p) { return 0; } +#endif int __init arch_init_kprobes(void) { diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c index 636ca0119c0e..7d6ea88796a6 100644 --- a/arch/arm64/kernel/probes/uprobes.c +++ b/arch/arm64/kernel/probes/uprobes.c @@ -195,8 +195,7 @@ static int uprobe_single_step_handler(struct pt_regs *regs, /* uprobe breakpoint handler hook */ static struct break_hook uprobes_break_hook = { - .esr_mask = BRK64_ESR_MASK, - .esr_val = BRK64_ESR_UPROBES, + .imm = BRK64_ESR_UPROBES, .fn = uprobe_breakpoint_handler, }; @@ -207,8 +206,8 @@ static struct step_hook uprobes_step_hook = { static int __init arch_init_uprobes(void) { - register_break_hook(&uprobes_break_hook); - register_step_hook(&uprobes_step_hook); + register_user_break_hook(&uprobes_break_hook); + register_user_step_hook(&uprobes_step_hook); return 0; } diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 34e65ee2e0b4..fb967baaba05 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -793,12 +793,10 @@ void arch_send_call_function_single_ipi(int cpu) smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC); } -#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL void arch_send_wakeup_ipi_mask(const struct cpumask *mask) { smp_cross_call_common(mask, IPI_WAKEUP); } -#endif #ifdef CONFIG_IRQ_WORK void arch_irq_work_raise(void) @@ -916,13 +914,8 @@ void handle_IPI(int ipinr, struct pt_regs *regs) break; #endif -#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL case IPI_WAKEUP: - WARN_ONCE(!acpi_parking_protocol_valid(cpu), - "CPU%u: Wake-up IPI outside the ACPI parking protocol\n", - cpu); break; -#endif default: pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 9f78751a5d63..244c2bb1f16b 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -858,9 +858,8 @@ static int bug_handler(struct pt_regs *regs, unsigned int esr) } static struct break_hook bug_break_hook = { - .esr_val = 0xf2000000 | BUG_BRK_IMM, - .esr_mask = 0xffffffff, .fn = bug_handler, + .imm = BUG_BRK_IMM, }; #ifdef CONFIG_KASAN_SW_TAGS @@ -929,11 +928,48 @@ int __init early_brk64(unsigned long addr, unsigned int esr, return bug_handler(regs, esr) != DBG_HOOK_HANDLED; } +static int refcount_overflow_handler(struct pt_regs *regs, unsigned int esr) +{ + u32 dummy_cbz = le32_to_cpup((__le32 *)(regs->pc + 4)); + bool zero = regs->pstate & PSR_Z_BIT; + u32 rt; + + /* + * Find the register that holds the counter address from the + * dummy 'cbz' instruction that follows the 'brk' instruction + * that sent us here. + */ + rt = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, dummy_cbz); + + /* First unconditionally saturate the refcount. */ + *(int *)regs->regs[rt] = INT_MIN / 2; + + /* + * This function has been called because either a negative refcount + * value was seen by any of the refcount functions, or a zero + * refcount value was seen by refcount_{add,dec}(). + */ + + /* point pc to the branch instruction that detected the overflow */ + regs->pc += 4 + aarch64_get_branch_offset(dummy_cbz); + refcount_error_report(regs, zero ? "hit zero" : "overflow"); + + /* advance pc and proceed */ + regs->pc += 4; + return DBG_HOOK_HANDLED; +} + +static struct break_hook refcount_break_hook = { + .fn = refcount_overflow_handler, + .imm = REFCOUNT_BRK_IMM, +}; + /* This registration must happen early, before debug_traps_init(). */ void __init trap_init(void) { - register_break_hook(&bug_break_hook); #ifdef CONFIG_KASAN_SW_TAGS register_break_hook(&kasan_break_hook); #endif + register_kernel_break_hook(&bug_break_hook); + register_kernel_break_hook(&refcount_break_hook); } diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 056c41d8d8ff..4aaf9ada5681 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -193,6 +193,8 @@ static int __init vdso_mappings_init(const char *name, if (vdso_pagelist == NULL) return -ENOMEM; + kmemleak_not_leak(vdso_pagelist); + /* Grab the vDSO data page. */ vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data)); diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index f598184d084b..153521d862c5 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -5,7 +5,7 @@ # A mix between the arm64 and arm vDSO Makefiles. ifeq ($(cc-name),clang) - CC_ARM32 := $(CC) $(CLANG_TARGET_ARM32) -no-integrated-as + CC_ARM32 := $(CC) $(CLANG_TARGET_ARM32) -no-integrated-as $(CLANG_GCC32_TC) $(CLANG_PREFIX32) GCC_ARM32_TC := $(realpath $(dir $(shell which $(CROSS_COMPILE_ARM32)ld))/..) ifneq ($(GCC_ARM32_TC),) CC_ARM32 += --gcc-toolchain=$(GCC_ARM32_TC) diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 6a632c6fdd7d..bd33b0b38dea 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -107,6 +107,8 @@ SECTIONS .head.text : { _text = .; + PROVIDE(__efistub__text = .); + HEAD_TEXT } .text : { /* Real text segment */ @@ -222,6 +224,7 @@ SECTIONS PECOFF_EDATA_PADDING __pecoff_data_rawsize = ABSOLUTE(. - __initdata_begin); _edata = .; + PROVIDE(__efistub__edata = .); BSS_SECTION(0, 0, 0) @@ -243,12 +246,15 @@ SECTIONS __pecoff_data_size = ABSOLUTE(. - __initdata_begin); _end = .; + PROVIDE(__efistub__end = .); STABS_DEBUG HEAD_SYMBOLS } +PROVIDE(__efistub_stext_offset = stext - _text); + /* * The HYP init code and ID map text can't be longer than a page each, * and should not cross a page boundary. diff --git a/arch/arm64/lib/atomic_ll_sc.c b/arch/arm64/lib/atomic_ll_sc.c index b0c538b0da28..8a335cd9f0e2 100644 --- a/arch/arm64/lib/atomic_ll_sc.c +++ b/arch/arm64/lib/atomic_ll_sc.c @@ -1,3 +1,15 @@ #include #define __ARM64_IN_ATOMIC_IMPL + +/* + * Disarm the refcount checks in the out-of-line LL/SC routines. These are + * redundant, given that the LSE callers already perform the same checks. + * We do have to make sure that we exit with a zero value if the pre-check + * detected a zero value. + */ +#undef REFCOUNT_POST_CHECK_NEG +#undef REFCOUNT_POST_CHECK_NEG_OR_ZERO +#define REFCOUNT_POST_CHECK_NEG +#define REFCOUNT_POST_CHECK_NEG_OR_ZERO "csel %w[val], wzr, %w[val], eq\n" + #include diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a9b831061e84..50d18514b51b 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -296,9 +296,6 @@ config ZONE_DMA32 config AUDIT_ARCH def_bool y if X86_64 -config ARCH_SUPPORTS_OPTIMIZED_INLINING - def_bool y - config ARCH_SUPPORTS_DEBUG_PAGEALLOC def_bool y diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index bec0952c5595..cb77a9e9f9fe 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -284,20 +284,6 @@ config CPA_DEBUG ---help--- Do change_page_attr() self-tests every 30 seconds. -config OPTIMIZE_INLINING - bool "Allow gcc to uninline functions marked 'inline'" - ---help--- - This option determines if the kernel forces gcc to inline the functions - developers have marked 'inline'. Doing so takes away freedom from gcc to - do what it thinks is best, which is desirable for the gcc 3.x series of - compilers. The gcc 4.x series have a rewritten inlining algorithm and - enabling this option will generate a smaller kernel there. Hopefully - this algorithm is so good that allowing gcc 4.x and above to make the - decision will become the default in the future. Until then this option - is there to test gcc for this. - - If unsure, say N. - config DEBUG_ENTRY bool "Debug low-level entry code" depends on DEBUG_KERNEL diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index a9a55e76a43f..4d193d491c92 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -5,7 +5,6 @@ #include #include #include -#include #define orc_warn(fmt, ...) \ printk_deferred_once(KERN_WARNING pr_fmt("WARNING: " fmt), ##__VA_ARGS__) @@ -120,7 +119,7 @@ static struct orc_entry *orc_find(unsigned long ip) } /* vmlinux .init slow lookup: */ - if (ip >= (unsigned long)_sinittext && ip < (unsigned long)_einittext) + if (init_kernel_text(ip)) return __orc_find(__start_orc_unwind_ip, __start_orc_unwind, __stop_orc_unwind_ip - __start_orc_unwind_ip, ip); diff --git a/block/blk-core.c b/block/blk-core.c index 782ca4af9291..926917ebb8f3 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1429,7 +1429,11 @@ retry: trace_block_sleeprq(q, bio, op); spin_unlock_irq(q->queue_lock); - io_schedule(); + /* + * FIXME: this should be io_schedule(). The timeout is there as a + * workaround for some io timeout problems. + */ + io_schedule_timeout(5*HZ); /* * After sleeping, we become a "batching" process and will be able diff --git a/block/blk.h b/block/blk.h index b2c287c2c6a3..0156c0185cbc 100644 --- a/block/blk.h +++ b/block/blk.h @@ -265,9 +265,7 @@ extern int blk_update_nr_requests(struct request_queue *, unsigned int); */ static inline int blk_do_io_stat(struct request *rq) { - return rq->rq_disk && - (rq->rq_flags & RQF_IO_STAT) && - !blk_rq_is_passthrough(rq); + return false; } static inline void req_set_nomerge(struct request_queue *q, struct request *req) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index a392ff457e17..29e5345c44e5 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -23,12 +23,12 @@ * tunables */ /* max queue in one round of service */ -static const int cfq_quantum = 8; +static const int cfq_quantum = 16; static const u64 cfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 }; /* maximum backwards seek, in KiB */ static const int cfq_back_max = 16 * 1024; /* penalty of a backwards seek */ -static const int cfq_back_penalty = 2; +static const int cfq_back_penalty = 1; static const u64 cfq_slice_sync = NSEC_PER_SEC / 10; static u64 cfq_slice_async = NSEC_PER_SEC / 25; static const int cfq_slice_async_rq = 2; diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 7440de44dd85..72aede253dfd 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -692,38 +692,9 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd) if (bd && bd == bd->bd_contains) return 0; - /* Actually none of these is particularly useful on a partition, - * but they are safe. - */ - switch (cmd) { - case SCSI_IOCTL_GET_IDLUN: - case SCSI_IOCTL_GET_BUS_NUMBER: - case SCSI_IOCTL_GET_PCI: - case SCSI_IOCTL_PROBE_HOST: - case SG_GET_VERSION_NUM: - case SG_SET_TIMEOUT: - case SG_GET_TIMEOUT: - case SG_GET_RESERVED_SIZE: - case SG_SET_RESERVED_SIZE: - case SG_EMULATED_HOST: - return 0; - case CDROM_GET_CAPABILITY: - /* Keep this until we remove the printk below. udev sends it - * and we do not want to spam dmesg about it. CD-ROMs do - * not have partitions, so we get here only for disks. - */ - return -ENOIOCTLCMD; - default: - break; - } - if (capable(CAP_SYS_RAWIO)) return 0; - /* In particular, rule out all resets and host-specific ioctls. */ - printk_ratelimited(KERN_WARNING - "%s: sending ioctl %x to a partition!\n", current->comm, cmd); - return -ENOIOCTLCMD; } EXPORT_SYMBOL(scsi_verify_blk_ioctl); diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 7bd038edc1f7..aed7f7bf60ce 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -123,8 +123,7 @@ enum { BINDER_DEBUG_PRIORITY_CAP = 1U << 13, BINDER_DEBUG_SPINLOCKS = 1U << 14, }; -static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | - BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; +static uint32_t binder_debug_mask = 0; module_param_named(debug_mask, binder_debug_mask, uint, 0644); char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; @@ -204,6 +203,14 @@ static inline void binder_stats_created(enum binder_stat_types type) struct binder_transaction_log binder_transaction_log; struct binder_transaction_log binder_transaction_log_failed; +static struct kmem_cache *binder_node_pool; +static struct kmem_cache *binder_proc_pool; +static struct kmem_cache *binder_ref_death_pool; +static struct kmem_cache *binder_ref_pool; +static struct kmem_cache *binder_thread_pool; +static struct kmem_cache *binder_transaction_pool; +static struct kmem_cache *binder_work_pool; + static struct binder_transaction_log_entry *binder_transaction_log_add( struct binder_transaction_log *log) { @@ -1351,9 +1358,9 @@ static struct binder_node *binder_init_node_ilocked( static struct binder_node *binder_new_node(struct binder_proc *proc, struct flat_binder_object *fp) { - struct binder_node *node; - struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL); + struct binder_node *node, *new_node; + new_node = kmem_cache_zalloc(binder_node_pool, GFP_KERNEL); if (!new_node) return NULL; binder_inner_proc_lock(proc); @@ -1363,14 +1370,14 @@ static struct binder_node *binder_new_node(struct binder_proc *proc, /* * The node was already added by another thread */ - kfree(new_node); + kmem_cache_free(binder_node_pool, new_node); return node; } static void binder_free_node(struct binder_node *node) { - kfree(node); + kmem_cache_free(binder_node_pool, node); binder_stats_deleted(BINDER_STAT_NODE); } @@ -1857,8 +1864,9 @@ static void binder_free_ref(struct binder_ref *ref) { if (ref->node) binder_free_node(ref->node); - kfree(ref->death); - kfree(ref); + if (ref->death) + kmem_cache_free(binder_ref_death_pool, ref->death); + kmem_cache_free(binder_ref_pool, ref); } /** @@ -1951,7 +1959,7 @@ static int binder_inc_ref_for_node(struct binder_proc *proc, ref = binder_get_ref_for_node_olocked(proc, node, NULL); if (!ref) { binder_proc_unlock(proc); - new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); + new_ref = kmem_cache_zalloc(binder_ref_pool, GFP_KERNEL); if (!new_ref) return -ENOMEM; binder_proc_lock(proc); @@ -1965,7 +1973,7 @@ static int binder_inc_ref_for_node(struct binder_proc *proc, * Another thread created the ref first so * free the one we allocated */ - kfree(new_ref); + kmem_cache_free(binder_ref_pool, new_ref); return ret; } @@ -2100,7 +2108,7 @@ static void binder_free_transaction(struct binder_transaction *t) * If the transaction has no target_proc, then * t->buffer->transaction has already been cleared. */ - kfree(t); + kmem_cache_free(binder_transaction_pool, t); binder_stats_deleted(BINDER_STAT_TRANSACTION); } @@ -3130,7 +3138,7 @@ static void binder_transaction(struct binder_proc *proc, e->to_proc = target_proc->pid; /* TODO: reuse incoming transaction for reply */ - t = kzalloc(sizeof(*t), GFP_KERNEL); + t = kmem_cache_zalloc(binder_transaction_pool, GFP_KERNEL); if (t == NULL) { return_error = BR_FAILED_REPLY; return_error_param = -ENOMEM; @@ -3140,7 +3148,7 @@ static void binder_transaction(struct binder_proc *proc, binder_stats_created(BINDER_STAT_TRANSACTION); spin_lock_init(&t->lock); - tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); + tcomplete = kmem_cache_zalloc(binder_work_pool, GFP_KERNEL); if (tcomplete == NULL) { return_error = BR_FAILED_REPLY; return_error_param = -ENOMEM; @@ -3563,10 +3571,10 @@ err_bad_extra_size: if (secctx) security_release_secctx(secctx, secctx_sz); err_get_secctx_failed: - kfree(tcomplete); + kmem_cache_free(binder_work_pool, tcomplete); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); err_alloc_tcomplete_failed: - kfree(t); + kmem_cache_free(binder_transaction_pool, t); binder_stats_deleted(BINDER_STAT_TRANSACTION); err_alloc_t_failed: err_bad_call_stack: @@ -3918,7 +3926,7 @@ static int binder_thread_write(struct binder_proc *proc, * Allocate memory for death notification * before taking lock */ - death = kzalloc(sizeof(*death), GFP_KERNEL); + death = kmem_cache_zalloc(binder_ref_death_pool, GFP_KERNEL); if (death == NULL) { WARN_ON(thread->return_error.cmd != BR_OK); @@ -3943,7 +3951,8 @@ static int binder_thread_write(struct binder_proc *proc, "BC_CLEAR_DEATH_NOTIFICATION", target); binder_proc_unlock(proc); - kfree(death); + if (death) + kmem_cache_free(binder_ref_death_pool, death); break; } @@ -3964,7 +3973,7 @@ static int binder_thread_write(struct binder_proc *proc, proc->pid, thread->pid); binder_node_unlock(ref->node); binder_proc_unlock(proc); - kfree(death); + kmem_cache_free(binder_ref_death_pool, death); break; } binder_stats_created(BINDER_STAT_DEATH); @@ -4264,7 +4273,7 @@ retry: case BINDER_WORK_TRANSACTION_COMPLETE: { binder_inner_proc_unlock(proc); cmd = BR_TRANSACTION_COMPLETE; - kfree(w); + kmem_cache_free(binder_work_pool, w); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; @@ -4385,7 +4394,7 @@ retry: (u64)cookie); if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { binder_inner_proc_unlock(proc); - kfree(death); + kmem_cache_free(binder_ref_death_pool, death); binder_stats_deleted(BINDER_STAT_DEATH); } else { binder_enqueue_work_ilocked( @@ -4555,7 +4564,7 @@ static void binder_release_work(struct binder_proc *proc, case BINDER_WORK_TRANSACTION_COMPLETE: { binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "undelivered TRANSACTION_COMPLETE\n"); - kfree(w); + kmem_cache_free(binder_work_pool, w); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); } break; case BINDER_WORK_DEAD_BINDER_AND_CLEAR: @@ -4566,7 +4575,7 @@ static void binder_release_work(struct binder_proc *proc, binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "undelivered death notification, %016llx\n", (u64)death->cookie); - kfree(death); + kmem_cache_free(binder_ref_death_pool, death); binder_stats_deleted(BINDER_STAT_DEATH); } break; default: @@ -4627,14 +4636,14 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc) thread = binder_get_thread_ilocked(proc, NULL); binder_inner_proc_unlock(proc); if (!thread) { - new_thread = kzalloc(sizeof(*thread), GFP_KERNEL); + new_thread = kmem_cache_zalloc(binder_thread_pool, GFP_KERNEL); if (new_thread == NULL) return NULL; binder_inner_proc_lock(proc); thread = binder_get_thread_ilocked(proc, new_thread); binder_inner_proc_unlock(proc); if (thread != new_thread) - kfree(new_thread); + kmem_cache_free(binder_thread_pool, new_thread); } return thread; } @@ -4646,7 +4655,7 @@ static void binder_free_proc(struct binder_proc *proc) binder_alloc_deferred_release(&proc->alloc); put_task_struct(proc->tsk); binder_stats_deleted(BINDER_STAT_PROC); - kfree(proc); + kmem_cache_free(binder_proc_pool, proc); } static void binder_free_thread(struct binder_thread *thread) @@ -4655,7 +4664,7 @@ static void binder_free_thread(struct binder_thread *thread) binder_stats_deleted(BINDER_STAT_THREAD); binder_proc_dec_tmpref(thread->proc); put_task_struct(thread->task); - kfree(thread); + kmem_cache_free(binder_thread_pool, thread); } static int binder_thread_release(struct binder_proc *proc, @@ -5166,7 +5175,7 @@ static int binder_open(struct inode *nodp, struct file *filp) binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__, current->group_leader->pid, current->pid); - proc = kzalloc(sizeof(*proc), GFP_KERNEL); + proc = kmem_cache_zalloc(binder_proc_pool, GFP_KERNEL); if (proc == NULL) return -ENOMEM; spin_lock_init(&proc->inner_lock); @@ -6082,6 +6091,73 @@ static int __init init_binder_device(const char *name) return ret; } +static int __init binder_create_pools(void) +{ + int ret; + + ret = binder_buffer_pool_create(); + if (ret) + return ret; + + binder_node_pool = KMEM_CACHE(binder_node, SLAB_HWCACHE_ALIGN); + if (!binder_node_pool) + goto err_node_pool; + + binder_proc_pool = KMEM_CACHE(binder_proc, SLAB_HWCACHE_ALIGN); + if (!binder_proc_pool) + goto err_proc_pool; + + binder_ref_death_pool = KMEM_CACHE(binder_ref_death, SLAB_HWCACHE_ALIGN); + if (!binder_ref_death_pool) + goto err_ref_death_pool; + + binder_ref_pool = KMEM_CACHE(binder_ref, SLAB_HWCACHE_ALIGN); + if (!binder_ref_pool) + goto err_ref_pool; + + binder_thread_pool = KMEM_CACHE(binder_thread, SLAB_HWCACHE_ALIGN); + if (!binder_thread_pool) + goto err_thread_pool; + + binder_transaction_pool = KMEM_CACHE(binder_transaction, SLAB_HWCACHE_ALIGN); + if (!binder_transaction_pool) + goto err_transaction_pool; + + binder_work_pool = KMEM_CACHE(binder_work, SLAB_HWCACHE_ALIGN); + if (!binder_work_pool) + goto err_work_pool; + + return 0; + +err_work_pool: + kmem_cache_destroy(binder_transaction_pool); +err_transaction_pool: + kmem_cache_destroy(binder_thread_pool); +err_thread_pool: + kmem_cache_destroy(binder_ref_pool); +err_ref_pool: + kmem_cache_destroy(binder_ref_death_pool); +err_ref_death_pool: + kmem_cache_destroy(binder_proc_pool); +err_proc_pool: + kmem_cache_destroy(binder_node_pool); +err_node_pool: + binder_buffer_pool_destroy(); + return -ENOMEM; +} + +static void __init binder_destroy_pools(void) +{ + binder_buffer_pool_destroy(); + kmem_cache_destroy(binder_node_pool); + kmem_cache_destroy(binder_proc_pool); + kmem_cache_destroy(binder_ref_death_pool); + kmem_cache_destroy(binder_ref_pool); + kmem_cache_destroy(binder_thread_pool); + kmem_cache_destroy(binder_transaction_pool); + kmem_cache_destroy(binder_work_pool); +} + static int __init binder_init(void) { int ret; @@ -6090,10 +6166,14 @@ static int __init binder_init(void) struct hlist_node *tmp; char *device_names = NULL; - ret = binder_alloc_shrinker_init(); + ret = binder_create_pools(); if (ret) return ret; + ret = binder_alloc_shrinker_init(); + if (ret) + goto err_alloc_shrinker_failed; + atomic_set(&binder_transaction_log.cur, ~0U); atomic_set(&binder_transaction_log_failed.cur, ~0U); @@ -6168,6 +6248,9 @@ err_init_binder_device_failed: err_alloc_device_names_failed: debugfs_remove_recursive(binder_debugfs_dir_entry_root); +err_alloc_shrinker_failed: + binder_destroy_pools(); + return ret; } diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 880affe45b07..eb3701141b5f 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -53,6 +53,22 @@ module_param_named(debug_mask, binder_alloc_debug_mask, pr_info(x); \ } while (0) +static struct kmem_cache *binder_buffer_pool; + +int binder_buffer_pool_create(void) +{ + binder_buffer_pool = KMEM_CACHE(binder_buffer, SLAB_HWCACHE_ALIGN); + if (!binder_buffer_pool) + return -ENOMEM; + + return 0; +} + +void binder_buffer_pool_destroy(void) +{ + kmem_cache_destroy(binder_buffer_pool); +} + static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer) { return list_entry(buffer->entry.next, struct binder_buffer, entry); @@ -464,7 +480,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( if (buffer_size != size) { struct binder_buffer *new_buffer; - new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + new_buffer = kmem_cache_zalloc(binder_buffer_pool, GFP_KERNEL); if (!new_buffer) { pr_err("%s: %d failed to alloc new buffer struct\n", __func__, alloc->pid); @@ -588,7 +604,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc, buffer_start_page(buffer) + PAGE_SIZE); } list_del(&buffer->entry); - kfree(buffer); + kmem_cache_free(binder_buffer_pool, buffer); } static void binder_free_buf_locked(struct binder_alloc *alloc, @@ -702,7 +718,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, } alloc->buffer_size = vma->vm_end - vma->vm_start; - buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + buffer = kmem_cache_zalloc(binder_buffer_pool, GFP_KERNEL); if (!buffer) { ret = -ENOMEM; failure_string = "alloc buffer struct"; @@ -760,7 +776,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) list_del(&buffer->entry); WARN_ON_ONCE(!list_empty(&alloc->buffers)); - kfree(buffer); + kmem_cache_free(binder_buffer_pool, buffer); } page_count = 0; diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index b60d161b7a7a..cc671734bb08 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -143,6 +143,8 @@ extern void binder_alloc_print_allocated(struct seq_file *m, struct binder_alloc *alloc); void binder_alloc_print_pages(struct seq_file *m, struct binder_alloc *alloc); +extern int binder_buffer_pool_create(void); +extern void binder_buffer_pool_destroy(void); /** * binder_alloc_get_free_async_space() - get free space available for async diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 2eddf419335a..74e5d181e143 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -7,3 +7,4 @@ obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o obj-$(CONFIG_HAVE_CLK) += clock_ops.o ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG +CFLAGS_wakeup.o += -DCONFIG_DEBUG_FS diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index c4accba07655..23a048f2beee 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -961,7 +961,7 @@ bool pm_wakeup_pending(void) pm_get_active_wakeup_sources(suspend_abort, MAX_SUSPEND_ABORT_LEN); log_suspend_abort_reason(suspend_abort); - pr_info("PM: %s\n", suspend_abort); + pr_debug("PM: %s\n", suspend_abort); } return ret || atomic_read(&pm_abort_suspend) > 0; diff --git a/drivers/bus/mhi/core/mhi_internal.h b/drivers/bus/mhi/core/mhi_internal.h index 1d3261855a81..7b63da5fd225 100644 --- a/drivers/bus/mhi/core/mhi_internal.h +++ b/drivers/bus/mhi/core/mhi_internal.h @@ -859,7 +859,7 @@ static inline void mhi_timesync_log(struct mhi_controller *mhi_cntrl) if (mhi_tsync && mhi_cntrl->tsync_log) mhi_cntrl->tsync_log(mhi_cntrl, - readq_no_log(mhi_tsync->time_reg)); + readq(mhi_tsync->time_reg)); } /* memory allocation methods */ diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c index 10d58cf33db1..e6de5bc9af13 100644 --- a/drivers/bus/mhi/core/mhi_main.c +++ b/drivers/bus/mhi/core/mhi_main.c @@ -2619,7 +2619,7 @@ int mhi_get_remote_time_sync(struct mhi_device *mhi_dev, local_irq_disable(); *t_host = mhi_cntrl->time_get(mhi_cntrl, mhi_cntrl->priv_data); - *t_dev = readq_relaxed_no_log(mhi_tsync->time_reg); + *t_dev = readq_relaxed(mhi_tsync->time_reg); local_irq_enable(); preempt_enable(); @@ -2720,7 +2720,7 @@ int mhi_get_remote_time(struct mhi_device *mhi_dev, mhi_tsync->local_time = mhi_cntrl->time_get(mhi_cntrl, mhi_cntrl->priv_data); - writel_relaxed_no_log(mhi_tsync->int_sequence, mhi_cntrl->tsync_db); + writel_relaxed(mhi_tsync->int_sequence, mhi_cntrl->tsync_db); /* write must go thru immediately */ wmb(); diff --git a/drivers/bus/mhi/devices/mhi_netdev.c b/drivers/bus/mhi/devices/mhi_netdev.c index 735de1daa0c6..28a619fef3c0 100644 --- a/drivers/bus/mhi/devices/mhi_netdev.c +++ b/drivers/bus/mhi/devices/mhi_netdev.c @@ -36,39 +36,21 @@ #define MSG_VERB(fmt, ...) do { \ if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_VERBOSE) \ pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__);\ - if (mhi_netdev->ipc_log && (*mhi_netdev->ipc_log_lvl <= \ - MHI_MSG_LVL_VERBOSE)) \ - ipc_log_string(mhi_netdev->ipc_log, "[D][%s] " fmt, \ - __func__, ##__VA_ARGS__); \ } while (0) #else -#define MSG_VERB(fmt, ...) do { \ - if (mhi_netdev->ipc_log && (*mhi_netdev->ipc_log_lvl <= \ - MHI_MSG_LVL_VERBOSE)) \ - ipc_log_string(mhi_netdev->ipc_log, "[D][%s] " fmt, \ - __func__, ##__VA_ARGS__); \ -} while (0) - +#define MSG_VERB(fmt, ...) ((void)0) #endif #define MSG_LOG(fmt, ...) do { \ if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_INFO) \ pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__);\ - if (mhi_netdev->ipc_log && (*mhi_netdev->ipc_log_lvl <= \ - MHI_MSG_LVL_INFO)) \ - ipc_log_string(mhi_netdev->ipc_log, "[I][%s] " fmt, \ - __func__, ##__VA_ARGS__); \ } while (0) #define MSG_ERR(fmt, ...) do { \ if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_ERROR) \ pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \ - if (mhi_netdev->ipc_log && (*mhi_netdev->ipc_log_lvl <= \ - MHI_MSG_LVL_ERROR)) \ - ipc_log_string(mhi_netdev->ipc_log, "[E][%s] " fmt, \ - __func__, ##__VA_ARGS__); \ } while (0) #define MHI_ASSERT(cond, msg) do { \ diff --git a/drivers/bus/mhi/devices/mhi_uci.c b/drivers/bus/mhi/devices/mhi_uci.c index 5be11340bfaa..1774a3cc46d8 100644 --- a/drivers/bus/mhi/devices/mhi_uci.c +++ b/drivers/bus/mhi/devices/mhi_uci.c @@ -78,40 +78,22 @@ enum MHI_DEBUG_LEVEL msg_lvl = MHI_MSG_LVL_ERROR; #define MSG_VERB(fmt, ...) do { \ if (msg_lvl <= MHI_MSG_LVL_VERBOSE) \ pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__); \ - if (uci_dev->ipc_log && uci_dev->ipc_log_lvl && \ - (*uci_dev->ipc_log_lvl <= MHI_MSG_LVL_VERBOSE)) \ - ipc_log_string(uci_dev->ipc_log, \ - "[D][%s] " fmt, __func__, ##__VA_ARGS__); \ } while (0) #else #define MHI_UCI_IPC_LOG_PAGES (1) -#define MSG_VERB(fmt, ...) do { \ - if (uci_dev->ipc_log && uci_dev->ipc_log_lvl && \ - (*uci_dev->ipc_log_lvl <= MHI_MSG_LVL_VERBOSE)) \ - ipc_log_string(uci_dev->ipc_log, \ - "[D][%s] " fmt, __func__, ##__VA_ARGS__); \ - } while (0) - +#define MSG_VERB(fmt, ...) ((void)0) #endif #define MSG_LOG(fmt, ...) do { \ if (msg_lvl <= MHI_MSG_LVL_INFO) \ pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__); \ - if (uci_dev->ipc_log && uci_dev->ipc_log_lvl && \ - (*uci_dev->ipc_log_lvl <= MHI_MSG_LVL_INFO)) \ - ipc_log_string(uci_dev->ipc_log, "[I][%s] " fmt, \ - __func__, ##__VA_ARGS__); \ } while (0) #define MSG_ERR(fmt, ...) do { \ if (msg_lvl <= MHI_MSG_LVL_ERROR) \ pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \ - if (uci_dev->ipc_log && uci_dev->ipc_log_lvl && \ - (*uci_dev->ipc_log_lvl <= MHI_MSG_LVL_ERROR)) \ - ipc_log_string(uci_dev->ipc_log, "[E][%s] " fmt, \ - __func__, ##__VA_ARGS__); \ } while (0) #define MAX_UCI_DEVICES (64) diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c index 6910d5c81001..0e7c657807dd 100644 --- a/drivers/char/diag/diag_debugfs.c +++ b/drivers/char/diag/diag_debugfs.c @@ -775,7 +775,9 @@ static ssize_t diag_dbgfs_write_debug(struct file *fp, const char __user *buf, if (value < 0) return -EINVAL; +#ifdef DIAG_DEBUG diag_debug_mask = (uint16_t)value; +#endif return count; } #endif diff --git a/drivers/char/diag/diag_ipc_logging.h b/drivers/char/diag/diag_ipc_logging.h index c7708e5f0327..9cb90c8e998e 100644 --- a/drivers/char/diag/diag_ipc_logging.h +++ b/drivers/char/diag/diag_ipc_logging.h @@ -30,16 +30,7 @@ #ifdef CONFIG_IPC_LOGGING extern uint16_t diag_debug_mask; extern void *diag_ipc_log; - -#define DIAG_LOG(log_lvl, msg, ...) \ - do { \ - if (diag_ipc_log && (log_lvl & diag_debug_mask)) { \ - ipc_log_string(diag_ipc_log, \ - "[%s] " msg, __func__, ##__VA_ARGS__); \ - } \ - } while (0) -#else -#define DIAG_LOG(log_lvl, msg, ...) #endif +#define DIAG_LOG(log_lvl, msg, ...) ((void)0) #endif diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c index 4b1df988c527..740c62150cbc 100644 --- a/drivers/char/diag/diagfwd_cntl.c +++ b/drivers/char/diag/diagfwd_cntl.c @@ -1321,7 +1321,7 @@ int diag_send_peripheral_buffering_mode(struct diag_buffering_mode_t *params) } if (!driver->buffering_flag[params->peripheral]) { - pr_err("diag: In %s, buffering flag not set for %d\n", __func__, + pr_debug("diag: In %s, buffering flag not set for %d\n", __func__, params->peripheral); return -EINVAL; } diff --git a/drivers/char/msm_smd_pkt.c b/drivers/char/msm_smd_pkt.c index 65a37a4909b5..5cd4be431e83 100644 --- a/drivers/char/msm_smd_pkt.c +++ b/drivers/char/msm_smd_pkt.c @@ -86,18 +86,11 @@ enum { SMD_PKT_INFO = 1U << 0, }; -#define SMD_PKT_INFO(x, ...) \ -do { \ - if (smd_pkt_debug_mask & SMD_PKT_INFO) { \ - ipc_log_string(smd_pkt_ilctxt, \ - "[%s]: "x, __func__, ##__VA_ARGS__); \ - } \ -} while (0) +#define SMD_PKT_INFO(x, ...) ((void)0) #define SMD_PKT_ERR(x, ...) \ do { \ pr_err_ratelimited("[%s]: "x, __func__, ##__VA_ARGS__); \ - ipc_log_string(smd_pkt_ilctxt, "[%s]: "x, __func__, ##__VA_ARGS__); \ } while (0) #define SMD_PKT_IOCTL_QUEUE_RX_INTENT \ diff --git a/drivers/char/random.c b/drivers/char/random.c index 3b5c4a22acb1..e66bc563c2cd 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -470,7 +470,6 @@ struct entropy_store { unsigned short add_ptr; unsigned short input_rotate; int entropy_count; - int entropy_total; unsigned int initialized:1; unsigned int last_data_init:1; __u8 last_data[EXTRACT_SIZE]; @@ -643,7 +642,7 @@ static void process_random_ready_list(void) */ static void credit_entropy_bits(struct entropy_store *r, int nbits) { - int entropy_count, orig; + int entropy_count, orig, has_initialized = 0; const int pool_size = r->poolinfo->poolfracbits; int nfrac = nbits << ENTROPY_SHIFT; @@ -698,47 +697,53 @@ retry: entropy_count = 0; } else if (entropy_count > pool_size) entropy_count = pool_size; + if ((r == &blocking_pool) && !r->initialized && + (entropy_count >> ENTROPY_SHIFT) > 128) + has_initialized = 1; if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) goto retry; - r->entropy_total += nbits; - if (!r->initialized && r->entropy_total > 128) { + if (has_initialized) { r->initialized = 1; - r->entropy_total = 0; + wake_up_interruptible(&random_read_wait); + kill_fasync(&fasync, SIGIO, POLL_IN); } trace_credit_entropy_bits(r->name, nbits, - entropy_count >> ENTROPY_SHIFT, - r->entropy_total, _RET_IP_); + entropy_count >> ENTROPY_SHIFT, _RET_IP_); if (r == &input_pool) { int entropy_bits = entropy_count >> ENTROPY_SHIFT; + struct entropy_store *other = &blocking_pool; - if (crng_init < 2 && entropy_bits >= 128) { + if (crng_init < 2) { + if (entropy_bits < 128) + return; crng_reseed(&primary_crng, r); entropy_bits = r->entropy_count >> ENTROPY_SHIFT; } + /* initialize the blocking pool if necessary */ + if (entropy_bits >= random_read_wakeup_bits && + !other->initialized) { + schedule_work(&other->push_work); + return; + } + /* should we wake readers? */ if (entropy_bits >= random_read_wakeup_bits && wq_has_sleeper(&random_read_wait)) { wake_up_interruptible(&random_read_wait); kill_fasync(&fasync, SIGIO, POLL_IN); } - /* If the input pool is getting full, send some - * entropy to the blocking pool until it is 75% full. + /* If the input pool is getting full, and the blocking + * pool has room, send some entropy to the blocking + * pool. */ - if (entropy_bits > random_write_wakeup_bits && - r->initialized && - r->entropy_total >= 2*random_read_wakeup_bits) { - struct entropy_store *other = &blocking_pool; - - if (other->entropy_count <= - 3 * other->poolinfo->poolfracbits / 4) { - schedule_work(&other->push_work); - r->entropy_total = 0; - } - } + if (!work_pending(&other->push_work) && + (ENTROPY_BITS(r) > 6 * r->poolinfo->poolbytes) && + (ENTROPY_BITS(other) <= 6 * other->poolinfo->poolbytes)) + schedule_work(&other->push_work); } } @@ -1071,7 +1076,6 @@ static ssize_t extract_crng_user(void __user *buf, size_t nbytes) struct timer_rand_state { cycles_t last_time; long last_delta, last_delta2; - unsigned dont_count_entropy:1; }; #define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, }; @@ -1100,8 +1104,6 @@ void add_device_randomness(const void *buf, unsigned int size) } EXPORT_SYMBOL(add_device_randomness); -static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE; - /* * This function adds entropy to the entropy "pool" by using timing * delays. It uses the timer_rand_state structure to make an estimate @@ -1122,8 +1124,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) } sample; long delta, delta2, delta3; - preempt_disable(); - sample.jiffies = jiffies; sample.cycles = random_get_entropy(); sample.num = num; @@ -1135,51 +1135,38 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) * We take into account the first, second and third-order deltas * in order to make our estimate. */ + delta = sample.jiffies - state->last_time; + state->last_time = sample.jiffies; - if (!state->dont_count_entropy) { - delta = sample.jiffies - state->last_time; - state->last_time = sample.jiffies; + delta2 = delta - state->last_delta; + state->last_delta = delta; - delta2 = delta - state->last_delta; - state->last_delta = delta; + delta3 = delta2 - state->last_delta2; + state->last_delta2 = delta2; - delta3 = delta2 - state->last_delta2; - state->last_delta2 = delta2; + if (delta < 0) + delta = -delta; + if (delta2 < 0) + delta2 = -delta2; + if (delta3 < 0) + delta3 = -delta3; + if (delta > delta2) + delta = delta2; + if (delta > delta3) + delta = delta3; - if (delta < 0) - delta = -delta; - if (delta2 < 0) - delta2 = -delta2; - if (delta3 < 0) - delta3 = -delta3; - if (delta > delta2) - delta = delta2; - if (delta > delta3) - delta = delta3; - - /* - * delta is now minimum absolute delta. - * Round down by 1 bit on general principles, - * and limit entropy entimate to 12 bits. - */ - credit_entropy_bits(r, min_t(int, fls(delta>>1), 11)); - } - preempt_enable(); + /* + * delta is now minimum absolute delta. + * Round down by 1 bit on general principles, + * and limit entropy entimate to 12 bits. + */ + credit_entropy_bits(r, min_t(int, fls(delta>>1), 11)); } void add_input_randomness(unsigned int type, unsigned int code, unsigned int value) { - static unsigned char last_value; - - /* ignore autorepeat and the like */ - if (value == last_value) - return; - - last_value = value; - add_timer_randomness(&input_timer_state, - (type << 4) ^ code ^ (code >> 4) ^ value); - trace_add_input_randomness(ENTROPY_BITS(&input_pool)); + return; } EXPORT_SYMBOL_GPL(add_input_randomness); @@ -1546,6 +1533,11 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, int large_request = (nbytes > 256); trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_); + if (!r->initialized && r->pull) { + xfer_secondary_pool(r, ENTROPY_BITS(r->pull)/8); + if (!r->initialized) + return 0; + } xfer_secondary_pool(r, nbytes); nbytes = account(r, nbytes, 0, 0); @@ -1642,6 +1634,56 @@ void get_random_bytes(void *buf, int nbytes) } EXPORT_SYMBOL(get_random_bytes); + +/* + * Each time the timer fires, we expect that we got an unpredictable + * jump in the cycle counter. Even if the timer is running on another + * CPU, the timer activity will be touching the stack of the CPU that is + * generating entropy.. + * + * Note that we don't re-arm the timer in the timer itself - we are + * happy to be scheduled away, since that just makes the load more + * complex, but we do not want the timer to keep ticking unless the + * entropy loop is running. + * + * So the re-arming always happens in the entropy loop itself. + */ +static void entropy_timer(struct timer_list *t) +{ + credit_entropy_bits(&input_pool, 1); +} + +/* + * If we have an actual cycle counter, see if we can + * generate enough entropy with timing noise + */ +static void try_to_generate_entropy(void) +{ + struct { + unsigned long now; + struct timer_list timer; + } stack; + + stack.now = random_get_entropy(); + + /* Slow counter - or none. Don't even bother */ + if (stack.now == random_get_entropy()) + return; + + timer_setup_on_stack(&stack.timer, entropy_timer, 0); + while (!crng_ready()) { + if (!timer_pending(&stack.timer)) + mod_timer(&stack.timer, jiffies+1); + mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now)); + schedule(); + stack.now = random_get_entropy(); + } + + del_timer_sync(&stack.timer); + destroy_timer_on_stack(&stack.timer); + mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now)); +} + /* * Wait for the urandom pool to be seeded and thus guaranteed to supply * cryptographically secure random numbers. This applies to: the /dev/urandom @@ -1656,7 +1698,17 @@ int wait_for_random_bytes(void) { if (likely(crng_ready())) return 0; - return wait_event_interruptible(crng_init_wait, crng_ready()); + + do { + int ret; + ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ); + if (ret) + return ret > 0 ? 0 : ret; + + try_to_generate_entropy(); + } while (!crng_ready()); + + return 0; } EXPORT_SYMBOL(wait_for_random_bytes); @@ -1739,7 +1791,7 @@ void get_random_bytes_arch(void *buf, int nbytes) if (!arch_get_random_long(&v)) break; - + memcpy(p, &v, chunk); p += chunk; nbytes -= chunk; @@ -1750,7 +1802,6 @@ void get_random_bytes_arch(void *buf, int nbytes) } EXPORT_SYMBOL(get_random_bytes_arch); - /* * init_std_data - initialize pool with system data * @@ -1842,14 +1893,14 @@ _random_read(int nonblock, char __user *buf, size_t nbytes) return -EAGAIN; wait_event_interruptible(random_read_wait, - ENTROPY_BITS(&input_pool) >= - random_read_wakeup_bits); + blocking_pool.initialized && + (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)); if (signal_pending(current)) return -ERESTARTSYS; } } -static ssize_t +static ssize_t __maybe_unused random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { return _random_read(file->f_flags & O_NONBLOCK, buf, nbytes); @@ -1998,10 +2049,11 @@ static int random_fasync(int fd, struct file *filp, int on) } const struct file_operations random_fops = { - .read = random_read, + .read = urandom_read, .write = random_write, .poll = random_poll, .unlocked_ioctl = random_ioctl, + .compat_ioctl = compat_ptr_ioctl, .fasync = random_fasync, .llseek = noop_llseek, }; @@ -2010,6 +2062,7 @@ const struct file_operations urandom_fops = { .read = urandom_read, .write = random_write, .unlocked_ioctl = random_ioctl, + .compat_ioctl = compat_ptr_ioctl, .fasync = random_fasync, .llseek = noop_llseek, }; diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c index a397013479f1..73abd958feb3 100644 --- a/drivers/clk/qcom/clk-cpu-osm.c +++ b/drivers/clk/qcom/clk-cpu-osm.c @@ -106,14 +106,9 @@ static inline int clk_osm_read_reg(struct clk_osm *c, u32 offset) return readl_relaxed(c->vbase + offset); } -static inline int clk_osm_read_reg_no_log(struct clk_osm *c, u32 offset) -{ - return readl_relaxed_no_log(c->vbase + offset); -} - static inline int clk_osm_mb(struct clk_osm *c) { - return readl_relaxed_no_log(c->vbase + ENABLE_REG); + return readl_relaxed(c->vbase + ENABLE_REG); } static long clk_osm_list_rate(struct clk_hw *hw, unsigned int n, @@ -914,7 +909,7 @@ static u64 clk_osm_get_cpu_cycle_counter(int cpu) * core DCVS is disabled. */ core_num = parent->per_core_dcvs ? c->core_num : 0; - val = clk_osm_read_reg_no_log(parent, + val = clk_osm_read_reg(parent, OSM_CYCLE_COUNTER_STATUS_REG(core_num)); if (val < c->prev_cycle_counter) { diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index b7e0a1fffe04..14f5090651fb 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -101,20 +101,20 @@ void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val, struct arch_timer *timer = to_arch_timer(clk); switch (reg) { case ARCH_TIMER_REG_CTRL: - writel_relaxed_no_log(val, timer->base + CNTP_CTL); + writel_relaxed(val, timer->base + CNTP_CTL); break; case ARCH_TIMER_REG_TVAL: - writel_relaxed_no_log(val, timer->base + CNTP_TVAL); + writel_relaxed(val, timer->base + CNTP_TVAL); break; } } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { struct arch_timer *timer = to_arch_timer(clk); switch (reg) { case ARCH_TIMER_REG_CTRL: - writel_relaxed_no_log(val, timer->base + CNTV_CTL); + writel_relaxed(val, timer->base + CNTV_CTL); break; case ARCH_TIMER_REG_TVAL: - writel_relaxed_no_log(val, timer->base + CNTV_TVAL); + writel_relaxed(val, timer->base + CNTV_TVAL); break; } } else { @@ -132,20 +132,20 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, struct arch_timer *timer = to_arch_timer(clk); switch (reg) { case ARCH_TIMER_REG_CTRL: - val = readl_relaxed_no_log(timer->base + CNTP_CTL); + val = readl_relaxed(timer->base + CNTP_CTL); break; case ARCH_TIMER_REG_TVAL: - val = readl_relaxed_no_log(timer->base + CNTP_TVAL); + val = readl_relaxed(timer->base + CNTP_TVAL); break; } } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { struct arch_timer *timer = to_arch_timer(clk); switch (reg) { case ARCH_TIMER_REG_CTRL: - val = readl_relaxed_no_log(timer->base + CNTV_CTL); + val = readl_relaxed(timer->base + CNTV_CTL); break; case ARCH_TIMER_REG_TVAL: - val = readl_relaxed_no_log(timer->base + CNTV_TVAL); + val = readl_relaxed(timer->base + CNTV_TVAL); break; } } else { @@ -893,11 +893,11 @@ void arch_timer_mem_get_cval(u32 *lo, u32 *hi) if (!arch_counter_base) return; - ctrl = readl_relaxed_no_log(arch_counter_base + CNTV_CTL); + ctrl = readl_relaxed(arch_counter_base + CNTV_CTL); if (ctrl & ARCH_TIMER_CTRL_ENABLE) { - *lo = readl_relaxed_no_log(arch_counter_base + CNTCVAL_LO); - *hi = readl_relaxed_no_log(arch_counter_base + CNTCVAL_HI); + *lo = readl_relaxed(arch_counter_base + CNTCVAL_LO); + *hi = readl_relaxed(arch_counter_base + CNTCVAL_HI); } } @@ -916,9 +916,9 @@ static u64 arch_counter_get_cntvct_mem(void) u32 vct_lo, vct_hi, tmp_hi; do { - vct_hi = readl_relaxed_no_log(arch_counter_base + CNTVCT_HI); - vct_lo = readl_relaxed_no_log(arch_counter_base + CNTVCT_LO); - tmp_hi = readl_relaxed_no_log(arch_counter_base + CNTVCT_HI); + vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI); + vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO); + tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI); } while (vct_hi != tmp_hi); return ((u64) vct_hi << 32) | vct_lo; @@ -1295,7 +1295,7 @@ arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem) return NULL; } - cnttidr = readl_relaxed_no_log(cntctlbase + CNTTIDR); + cnttidr = readl_relaxed(cntctlbase + CNTTIDR); /* * Try to find a virtual capable frame. Otherwise fall back to a diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 146237aab395..69fc5cf4782f 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -276,7 +276,7 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time, struct policy_dbs_info *policy_dbs = cdbs->policy_dbs; u64 delta_ns, lst; - if (!cpufreq_can_do_remote_dvfs(policy_dbs->policy)) + if (!cpufreq_this_cpu_can_update(policy_dbs->policy)) return; /* diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 235d21cda429..fc2cc0084c05 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -37,6 +37,24 @@ static int enabled_devices; static int off __read_mostly; static int initialized __read_mostly; +#ifdef CONFIG_SMP +static atomic_t idled = ATOMIC_INIT(0); + +#if NR_CPUS > 32 +#error idled CPU mask not big enough for NR_CPUS +#endif + +void cpuidle_set_idle_cpu(unsigned int cpu) +{ + atomic_or(BIT(cpu), &idled); +} + +void cpuidle_clear_idle_cpu(unsigned int cpu) +{ + atomic_andnot(BIT(cpu), &idled); +} +#endif + int cpuidle_disabled(void) { return off; @@ -239,17 +257,17 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, if (!cpuidle_state_is_coupled(drv, index)) local_irq_enable(); - diff = ktime_us_delta(time_end, time_start); - if (diff > INT_MAX) - diff = INT_MAX; - - dev->last_residency = (int) diff; - if (entered_state >= 0) { - /* Update cpuidle counters */ - /* This can be moved to within driver enter routine + /* + * Update cpuidle counters + * This can be moved to within driver enter routine, * but that results in multiple copies of same code. */ + diff = ktime_us_delta(time_end, time_start); + if (diff > INT_MAX) + diff = INT_MAX; + + dev->last_residency = (int)diff; dev->states_usage[entered_state].time += dev->last_residency; dev->states_usage[entered_state].usage++; } else { @@ -643,22 +661,12 @@ EXPORT_SYMBOL_GPL(cpuidle_register); static void wake_up_idle_cpus(void *v) { - int cpu; - struct cpumask cpus; + unsigned long cpus = atomic_read(&idled) & *cpumask_bits(to_cpumask(v)); - preempt_disable(); - if (v) { - cpumask_andnot(&cpus, v, cpu_isolated_mask); - cpumask_and(&cpus, &cpus, cpu_online_mask); - } else - cpumask_andnot(&cpus, cpu_online_mask, cpu_isolated_mask); - - for_each_cpu(cpu, &cpus) { - if (cpu == smp_processor_id()) - continue; - wake_up_if_idle(cpu); - } - preempt_enable(); + /* Use READ_ONCE to get the isolated mask outside cpu_add_remove_lock */ + cpus &= ~READ_ONCE(*cpumask_bits(cpu_isolated_mask)); + if (cpus) + arch_send_wakeup_ipi_mask(to_cpumask(&cpus)); } /* diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c index 3561728ef7c6..f90065bb6213 100644 --- a/drivers/cpuidle/lpm-levels.c +++ b/drivers/cpuidle/lpm-levels.c @@ -41,7 +41,6 @@ #include #include #include -#include #include #include #include @@ -60,30 +59,6 @@ #define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24) #define BIAS_HYST (bias_hyst * NSEC_PER_MSEC) -enum { - MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0), - MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1), -}; - -enum debug_event { - CPU_ENTER, - CPU_EXIT, - CLUSTER_ENTER, - CLUSTER_EXIT, - CPU_HP_STARTING, - CPU_HP_DYING, -}; - -struct lpm_debug { - u64 time; - enum debug_event evt; - int cpu; - uint32_t arg1; - uint32_t arg2; - uint32_t arg3; - uint32_t arg4; -}; - static struct system_pm_ops *sys_pm_ops; @@ -122,9 +97,6 @@ static bool suspend_in_progress; static struct hrtimer lpm_hrtimer; static DEFINE_PER_CPU(struct hrtimer, histtimer); static DEFINE_PER_CPU(struct hrtimer, biastimer); -static struct lpm_debug *lpm_debug; -static phys_addr_t lpm_debug_phys; -static const int num_dbg_elements = 0x100; static void cluster_unprepare(struct lpm_cluster *cluster, const struct cpumask *cpu, int child_idx, bool from_idle, @@ -306,38 +278,10 @@ int lpm_get_latency(struct latency_level *level, uint32_t *latency) } EXPORT_SYMBOL(lpm_get_latency); -static void update_debug_pc_event(enum debug_event event, uint32_t arg1, - uint32_t arg2, uint32_t arg3, uint32_t arg4) -{ - struct lpm_debug *dbg; - int idx; - static DEFINE_SPINLOCK(debug_lock); - static int pc_event_index; - - if (!lpm_debug) - return; - - spin_lock(&debug_lock); - idx = pc_event_index++; - dbg = &lpm_debug[idx & (num_dbg_elements - 1)]; - - dbg->evt = event; - dbg->time = arch_counter_get_cntvct(); - dbg->cpu = raw_smp_processor_id(); - dbg->arg1 = arg1; - dbg->arg2 = arg2; - dbg->arg3 = arg3; - dbg->arg4 = arg4; - spin_unlock(&debug_lock); -} - static int lpm_dying_cpu(unsigned int cpu) { struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent; - update_debug_pc_event(CPU_HP_DYING, cpu, - cluster->num_children_in_sync.bits[0], - cluster->child_cpus.bits[0], false); cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0); return 0; } @@ -346,9 +290,6 @@ static int lpm_starting_cpu(unsigned int cpu) { struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent; - update_debug_pc_event(CPU_HP_STARTING, cpu, - cluster->num_children_in_sync.bits[0], - cluster->child_cpus.bits[0], false); cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0, true); return 0; @@ -734,7 +675,7 @@ static int cpu_power_select(struct cpuidle_device *dev, min_residency = pwr_params->min_residency; max_residency = pwr_params->max_residency; - if (latency_us < lvl_latency_us) + if (latency_us <= lvl_latency_us) break; if (next_event_us) { @@ -1077,7 +1018,7 @@ static int cluster_select(struct lpm_cluster *cluster, bool from_idle, &level->num_cpu_votes)) continue; - if (from_idle && latency_us < pwr_params->exit_latency) + if (from_idle && latency_us <= pwr_params->exit_latency) break; if (sleep_us < (pwr_params->exit_latency + @@ -1137,9 +1078,6 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx, return -EPERM; if (idx != cluster->default_level) { - update_debug_pc_event(CLUSTER_ENTER, idx, - cluster->num_children_in_sync.bits[0], - cluster->child_cpus.bits[0], from_idle); trace_cluster_enter(cluster->cluster_name, idx, cluster->num_children_in_sync.bits[0], cluster->child_cpus.bits[0], from_idle); @@ -1302,9 +1240,6 @@ static void cluster_unprepare(struct lpm_cluster *cluster, if (sys_pm_ops && sys_pm_ops->exit) sys_pm_ops->exit(success); - update_debug_pc_event(CLUSTER_EXIT, cluster->last_level, - cluster->num_children_in_sync.bits[0], - cluster->child_cpus.bits[0], from_idle); trace_cluster_exit(cluster->cluster_name, cluster->last_level, cluster->num_children_in_sync.bits[0], cluster->child_cpus.bits[0], from_idle); @@ -1418,15 +1353,11 @@ static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle) affinity_level = PSCI_AFFINITY_LEVEL(affinity_level); state_id += power_state + affinity_level + cpu->levels[idx].psci_id; - update_debug_pc_event(CPU_ENTER, state_id, - 0xdeaffeed, 0xdeaffeed, from_idle); stop_critical_timings(); success = !arm_cpuidle_suspend(state_id); start_critical_timings(); - update_debug_pc_event(CPU_EXIT, state_id, - success, 0xdeaffeed, from_idle); if (from_idle && cpu->levels[idx].use_bc_timer) tick_broadcast_exit(); @@ -1511,7 +1442,9 @@ static int lpm_cpuidle_enter(struct cpuidle_device *dev, if (need_resched()) goto exit; + cpuidle_set_idle_cpu(dev->cpu); success = psci_enter_sleep(cpu, idx, true); + cpuidle_clear_idle_cpu(dev->cpu); exit: end_time = ktime_to_ns(ktime_get()); @@ -1803,11 +1736,9 @@ static const struct platform_s2idle_ops lpm_s2idle_ops = { static int lpm_probe(struct platform_device *pdev) { int ret; - int size; unsigned int cpu; struct hrtimer *cpu_histtimer; struct kobject *module_kobj = NULL; - struct md_region md_entry; get_online_cpus(); lpm_root_node = lpm_of_parse_cluster(pdev); @@ -1839,10 +1770,6 @@ static int lpm_probe(struct platform_device *pdev) cluster_timer_init(lpm_root_node); - size = num_dbg_elements * sizeof(struct lpm_debug); - lpm_debug = dma_alloc_coherent(&pdev->dev, size, - &lpm_debug_phys, GFP_KERNEL); - register_cluster_lpm_stats(lpm_root_node, NULL); ret = cluster_cpuidle_register(lpm_root_node); @@ -1871,14 +1798,6 @@ static int lpm_probe(struct platform_device *pdev) goto failed; } - /* Add lpm_debug to Minidump*/ - strlcpy(md_entry.name, "KLPMDEBUG", sizeof(md_entry.name)); - md_entry.virt_addr = (uintptr_t)lpm_debug; - md_entry.phys_addr = lpm_debug_phys; - md_entry.size = size; - if (msm_minidump_add_region(&md_entry)) - pr_info("Failed to add lpm_debug in Minidump\n"); - return 0; failed: free_cluster_node(lpm_root_node); diff --git a/drivers/crypto/msm/ota_crypto.c b/drivers/crypto/msm/ota_crypto.c index e992324ce5ba..56b42d54f677 100644 --- a/drivers/crypto/msm/ota_crypto.c +++ b/drivers/crypto/msm/ota_crypto.c @@ -141,9 +141,11 @@ struct qcota_stat { u64 f9_op_fail; }; static struct qcota_stat _qcota_stat; +#ifdef CONFIG_DEBUG_FS static struct dentry *_debug_dent; static char _debug_read_buf[DEBUG_MAX_RW_BUF]; static int _debug_qcota; +#endif static struct ota_dev_control *qcota_control(void) { @@ -834,6 +836,7 @@ static struct platform_driver qcota_plat_driver = { }, }; +#ifdef CONFIG_DEBUG_FS static int _disp_stats(void) { struct qcota_stat *pstat; @@ -985,15 +988,15 @@ err: debugfs_remove_recursive(_debug_dent); return rc; } +#endif static int __init qcota_init(void) { - int rc; struct ota_dev_control *podev; - rc = _qcota_debug_init(); - if (rc) - return rc; +#ifdef CONFIG_DEBUG_FS + _qcota_debug_init(); +#endif podev = &qcota_dev; INIT_LIST_HEAD(&podev->ready_commands); @@ -1007,7 +1010,9 @@ static int __init qcota_init(void) } static void __exit qcota_exit(void) { +#ifdef CONFIG_DEBUG_FS debugfs_remove_recursive(_debug_dent); +#endif platform_driver_unregister(&qcota_plat_driver); } diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c index 1e26d92d3443..c74844f7cbed 100644 --- a/drivers/crypto/msm/qcedev.c +++ b/drivers/crypto/msm/qcedev.c @@ -233,9 +233,11 @@ struct qcedev_stat { }; static struct qcedev_stat _qcedev_stat; +#ifdef CONFIG_DEBUG_FS static struct dentry *_debug_dent; static char _debug_read_buf[DEBUG_MAX_RW_BUF]; static int _debug_qcedev; +#endif static struct qcedev_control *qcedev_minor_to_control(unsigned int n) { @@ -2231,6 +2233,7 @@ static struct platform_driver qcedev_plat_driver = { }, }; +#ifdef CONFIG_DEBUG_FS static int _disp_stats(int id) { struct qcedev_stat *pstat; @@ -2320,16 +2323,22 @@ err: debugfs_remove_recursive(_debug_dent); return rc; } +#endif static int qcedev_init(void) { +#ifdef CONFIG_DEBUG_FS _qcedev_debug_init(); +#endif + return platform_driver_register(&qcedev_plat_driver); } static void qcedev_exit(void) { +#ifdef CONFIG_DEBUG_FS debugfs_remove_recursive(_debug_dent); +#endif platform_driver_unregister(&qcedev_plat_driver); } diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c index 80836f04b527..676b2799a336 100644 --- a/drivers/crypto/msm/qcrypto.c +++ b/drivers/crypto/msm/qcrypto.c @@ -125,8 +125,10 @@ struct crypto_stat { u64 ahash_op_fail; }; static struct crypto_stat _qcrypto_stat; +#ifdef CONFIG_DEBUG_FS static struct dentry *_debug_dent; static char _debug_read_buf[DEBUG_MAX_RW_BUF]; +#endif static bool _qcrypto_init_assign; struct crypto_priv; struct qcrypto_req_control { @@ -1147,6 +1149,7 @@ static void _qcrypto_cra_aead_aes_exit(struct crypto_aead *tfm) ctx->ahash_aead_aes192_fb = NULL; } +#ifdef CONFIG_DEBUG_FS static int _disp_stats(int id) { struct crypto_stat *pstat; @@ -1320,6 +1323,7 @@ static int _disp_stats(int id) i, cp->cpu_req[i]); return len; } +#endif static void _qcrypto_remove_engine(struct crypto_engine *pengine) { @@ -5462,6 +5466,7 @@ static struct platform_driver __qcrypto = { }, }; +#ifdef CONFIG_DEBUG_FS static int _debug_qcrypto; static int _debug_stats_open(struct inode *inode, struct file *file) @@ -5550,12 +5555,16 @@ err: debugfs_remove_recursive(_debug_dent); return rc; } +#endif static int __init _qcrypto_init(void) { struct crypto_priv *pcp = &qcrypto_dev; +#ifdef CONFIG_DEBUG_FS _qcrypto_debug_init(); +#endif + INIT_LIST_HEAD(&pcp->alg_list); INIT_LIST_HEAD(&pcp->engine_list); init_llist_head(&pcp->ordered_resp_list); @@ -5580,7 +5589,9 @@ static int __init _qcrypto_init(void) static void __exit _qcrypto_exit(void) { pr_debug("%s Unregister QCRYPTO\n", __func__); +#ifdef CONFIG_DEBUG_FS debugfs_remove_recursive(_debug_dent); +#endif platform_driver_unregister(&__qcrypto); } diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index a9c82e9b8892..652e4108c07c 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -47,6 +47,18 @@ static atomic_long_t name_counter; +static struct kmem_cache *kmem_attach_pool; +static struct kmem_cache *kmem_dma_buf_pool; + +void __init init_dma_buf_kmem_pool(void) +{ + kmem_attach_pool = KMEM_CACHE(dma_buf_attachment, SLAB_HWCACHE_ALIGN | SLAB_PANIC); + kmem_dma_buf_pool = kmem_cache_create("dma_buf", + (sizeof(struct dma_buf) + sizeof(struct reservation_object)), + (sizeof(struct dma_buf) + sizeof(struct reservation_object)), + SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); +} + static inline int is_dma_buf_file(struct file *); struct dma_buf_list { @@ -138,7 +150,10 @@ static int dma_buf_release(struct inode *inode, struct file *file) module_put(dmabuf->owner); kfree(dmabuf->buf_name); - kfree(dmabuf); + if (dmabuf->from_kmem) + kmem_cache_free(kmem_dma_buf_pool, dmabuf); + else + kfree(dmabuf); return 0; } @@ -564,6 +579,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) char *bufname; int ret; long cnt; + bool from_kmem; if (!exp_info->resv) alloc_size += sizeof(struct reservation_object); @@ -592,7 +608,16 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) goto err_module; } - dmabuf = kzalloc(alloc_size, GFP_KERNEL); + from_kmem = (alloc_size == + (sizeof(struct dma_buf) + sizeof(struct reservation_object))); + + if (from_kmem) { + dmabuf = kmem_cache_zalloc(kmem_dma_buf_pool, GFP_KERNEL); + dmabuf->from_kmem = true; + } else { + dmabuf = kzalloc(alloc_size, GFP_KERNEL); + } + if (!dmabuf) { ret = -ENOMEM; goto err_name; @@ -637,7 +662,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) return dmabuf; err_dmabuf: - kfree(dmabuf); + if (from_kmem) + kmem_cache_free(kmem_dma_buf_pool, dmabuf); + else + kfree(dmabuf); err_name: kfree(bufname); err_module: @@ -744,8 +772,8 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, if (WARN_ON(!dmabuf || !dev)) return ERR_PTR(-EINVAL); - attach = kzalloc(sizeof(*attach), GFP_KERNEL); - if (!attach) + attach = kmem_cache_zalloc(kmem_attach_pool, GFP_KERNEL); + if (attach == NULL) return ERR_PTR(-ENOMEM); attach->dev = dev; @@ -764,7 +792,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, return attach; err_attach: - kfree(attach); + kmem_cache_free(kmem_attach_pool, attach); mutex_unlock(&dmabuf->lock); return ERR_PTR(ret); } @@ -789,7 +817,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) dmabuf->ops->detach(dmabuf, attach); mutex_unlock(&dmabuf->lock); - kfree(attach); + kmem_cache_free(kmem_attach_pool, attach); } EXPORT_SYMBOL_GPL(dma_buf_detach); diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index bf65e634590b..72f41d5f03eb 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -27,12 +27,18 @@ #include static const struct file_operations sync_file_fops; +static struct kmem_cache *kmem_sync_file_pool; + +void __init init_sync_kmem_pool(void) +{ + kmem_sync_file_pool = KMEM_CACHE(sync_file, SLAB_HWCACHE_ALIGN | SLAB_PANIC); +} static struct sync_file *sync_file_alloc(void) { struct sync_file *sync_file; - sync_file = kzalloc(sizeof(*sync_file), GFP_KERNEL); + sync_file = kmem_cache_zalloc(kmem_sync_file_pool, GFP_KERNEL); if (!sync_file) return NULL; @@ -48,7 +54,7 @@ static struct sync_file *sync_file_alloc(void) return sync_file; err: - kfree(sync_file); + kmem_cache_free(kmem_sync_file_pool, sync_file); return NULL; } @@ -307,7 +313,7 @@ static int sync_file_release(struct inode *inode, struct file *file) if (test_bit(POLL_ENABLED, &sync_file->flags)) dma_fence_remove_callback(sync_file->fence, &sync_file->cb); dma_fence_put(sync_file->fence); - kfree(sync_file); + kmem_cache_free(kmem_sync_file_pool, sync_file); return 0; } @@ -408,6 +414,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, { struct sync_file_info info; struct sync_fence_info *fence_info = NULL; + struct sync_fence_info fence_info_onstack[4] __aligned(8); struct dma_fence **fences; __u32 size; int num_fences, ret, i; @@ -437,9 +444,15 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, return -EINVAL; size = num_fences * sizeof(*fence_info); - fence_info = kzalloc(size, GFP_KERNEL); - if (!fence_info) - return -ENOMEM; + + if (likely(size <= sizeof(fence_info_onstack))) { + memset(fence_info_onstack, 0, sizeof(fence_info_onstack)); + fence_info = fence_info_onstack; + } else { + fence_info = kzalloc(size, GFP_KERNEL); + if (!fence_info) + return -ENOMEM; + } for (i = 0; i < num_fences; i++) { int status = sync_fill_fence_info(fences[i], &fence_info[i]); @@ -462,7 +475,8 @@ no_fences: ret = 0; out: - kfree(fence_info); + if (unlikely(fence_info != fence_info_onstack)) + kfree(fence_info); return ret; } diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index faaaf10311ec..e4de053a39de 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1087,6 +1087,9 @@ int dma_async_device_register(struct dma_device *device) dma_channel_rebalance(); mutex_unlock(&dma_list_mutex); + if (!chancnt) + kfree(idr_ref); + return 0; err_out: diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c index a6cc0b9336c7..b7ed93155534 100644 --- a/drivers/dma/qcom/gpi.c +++ b/drivers/dma/qcom/gpi.c @@ -43,16 +43,10 @@ #define GPI_LOG(gpi_dev, fmt, ...) do { \ if (gpi_dev->klog_lvl != LOG_LVL_MASK_ALL) \ dev_dbg(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \ - if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl != LOG_LVL_MASK_ALL) \ - ipc_log_string(gpi_dev->ilctxt, \ - "%s: " fmt, __func__, ##__VA_ARGS__); \ } while (0) #define GPI_ERR(gpi_dev, fmt, ...) do { \ if (gpi_dev->klog_lvl >= LOG_LVL_ERROR) \ dev_err(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \ - if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl >= LOG_LVL_ERROR) \ - ipc_log_string(gpi_dev->ilctxt, \ - "%s: " fmt, __func__, ##__VA_ARGS__); \ } while (0) /* gpii specific logging macros */ @@ -60,28 +54,16 @@ if (gpii->klog_lvl >= LOG_LVL_INFO) \ pr_info("%s:%u:%s: " fmt, gpii->label, ch, \ __func__, ##__VA_ARGS__); \ - if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_INFO) \ - ipc_log_string(gpii->ilctxt, \ - "ch:%u %s: " fmt, ch, \ - __func__, ##__VA_ARGS__); \ } while (0) #define GPII_ERR(gpii, ch, fmt, ...) do { \ if (gpii->klog_lvl >= LOG_LVL_ERROR) \ pr_err("%s:%u:%s: " fmt, gpii->label, ch, \ __func__, ##__VA_ARGS__); \ - if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_ERROR) \ - ipc_log_string(gpii->ilctxt, \ - "ch:%u %s: " fmt, ch, \ - __func__, ##__VA_ARGS__); \ } while (0) #define GPII_CRITIC(gpii, ch, fmt, ...) do { \ if (gpii->klog_lvl >= LOG_LVL_CRITICAL) \ pr_err("%s:%u:%s: " fmt, gpii->label, ch, \ __func__, ##__VA_ARGS__); \ - if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_CRITICAL) \ - ipc_log_string(gpii->ilctxt, \ - "ch:%u %s: " fmt, ch, \ - __func__, ##__VA_ARGS__); \ } while (0) enum DEBUG_LOG_LVL { @@ -109,19 +91,11 @@ enum EV_PRIORITY { if (gpii->klog_lvl >= LOG_LVL_REG_ACCESS) \ pr_info("%s:%u:%s: " fmt, gpii->label, \ ch, __func__, ##__VA_ARGS__); \ - if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_REG_ACCESS) \ - ipc_log_string(gpii->ilctxt, \ - "ch:%u %s: " fmt, ch, \ - __func__, ##__VA_ARGS__); \ } while (0) #define GPII_VERB(gpii, ch, fmt, ...) do { \ if (gpii->klog_lvl >= LOG_LVL_VERBOSE) \ pr_info("%s:%u:%s: " fmt, gpii->label, \ ch, __func__, ##__VA_ARGS__); \ - if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_VERBOSE) \ - ipc_log_string(gpii->ilctxt, \ - "ch:%u %s: " fmt, ch, \ - __func__, ##__VA_ARGS__); \ } while (0) #else diff --git a/drivers/firmware/qcom/Kconfig b/drivers/firmware/qcom/Kconfig index 61c797430bd6..0cb4ac699142 100644 --- a/drivers/firmware/qcom/Kconfig +++ b/drivers/firmware/qcom/Kconfig @@ -1,6 +1,5 @@ config MSM_TZ_LOG tristate "MSM Trust Zone (TZ) Log Driver" - depends on DEBUG_FS help This option enables a driver with a debugfs interface for messages produced by the Secure code (Trust zone). These messages provide diff --git a/drivers/firmware/qcom/tz_log.c b/drivers/firmware/qcom/tz_log.c index f8d3dd338506..a1001156f217 100644 --- a/drivers/firmware/qcom/tz_log.c +++ b/drivers/firmware/qcom/tz_log.c @@ -1118,17 +1118,16 @@ static int tz_log_probe(struct platform_device *pdev) tzdbg.diag_buf = (struct tzdbg_t *)ptr; - if (tzdbgfs_init(pdev)) - goto err; + if (tzdbgfs_init(pdev)) { + kfree(tzdbg.diag_buf); + tzdbg.diag_buf = NULL; + } tzdbg_register_qsee_log_buf(pdev); tzdbg_get_tz_version(); return 0; -err: - kfree(tzdbg.diag_buf); - return -ENXIO; } static int tz_log_remove(struct platform_device *pdev) diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 3b04c25100ae..56a9ef5398c9 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -101,7 +101,7 @@ int drm_irq_install(struct drm_device *dev, int irq) { int ret; - unsigned long sh_flags = 0; + unsigned long sh_flags = IRQF_PERF_CRITICAL; if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return -EINVAL; diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 5e1c40a8ac34..b8cffaee54fc 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -73,7 +73,6 @@ config DRM_MSM_DSI_STAGING config DSI_PARSER bool "Enable DSI panel configuration parser" - depends on DYNAMIC_DEBUG default y help Choose this option if you need text parser for a DSI panel @@ -198,3 +197,7 @@ config DRM_SDE_RSC avoids the display core power collapse. A client can also register for display core power collapse events on rsc. +config FENCE_DEBUG + bool "Print fence name to userspace" + depends on DRM_MSM + default n diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 93ee568c3a6e..9fd2c4579d8e 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -221,3 +221,5 @@ msm_drm-$(CONFIG_DRM_MSM) += \ msm_debugfs.o obj-$(CONFIG_DRM_MSM) += msm_drm.o + +CFLAGS_sde_crtc.o += -Wframe-larger-than=8192 diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c index ce90c3fdcb05..bef549361cf7 100644 --- a/drivers/gpu/drm/msm/dp/dp_debug.c +++ b/drivers/gpu/drm/msm/dp/dp_debug.c @@ -14,6 +14,9 @@ #define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ +#if defined(CONFIG_ANDROID) && !defined(CONFIG_DEBUG_FS) +#define CONFIG_DEBUG_FS +#endif #include #include diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index fb7811d5bfee..8e79de1bb428 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -14,6 +14,9 @@ #define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ +#if defined(CONFIG_ANDROID) && !defined(CONFIG_DEBUG_FS) +#define CONFIG_DEBUG_FS +#endif #include #include #include diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c index 1e19fe3ce85b..755c1de01395 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.c +++ b/drivers/gpu/drm/msm/dp/dp_drm.c @@ -14,6 +14,9 @@ #define pr_fmt(fmt) "[drm-dp]: %s: " fmt, __func__ +#if defined(CONFIG_ANDROID) && !defined(CONFIG_DEBUG_FS) +#define CONFIG_DEBUG_FS +#endif #include #include #include diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c index fef1c04f7e5d..f13020b8e9df 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c @@ -210,7 +210,7 @@ static int dsi_ctrl_debugfs_init(struct dsi_ctrl *dsi_ctrl, dir = debugfs_create_dir(dsi_ctrl->name, parent); if (IS_ERR_OR_NULL(dir)) { rc = PTR_ERR(dir); - pr_err("[DSI_%d] debugfs create dir failed, rc=%d\n", + pr_debug("[DSI_%d] debugfs create dir failed, rc=%d\n", dsi_ctrl->cell_index, rc); goto error; } @@ -1923,7 +1923,7 @@ static struct platform_driver dsi_ctrl_driver = { }, }; -#if defined(CONFIG_DEBUG_FS) +#if 0 void dsi_ctrl_debug_dump(u32 *entries, u32 size) { @@ -2037,12 +2037,7 @@ int dsi_ctrl_drv_init(struct dsi_ctrl *dsi_ctrl, struct dentry *parent) goto error; } - rc = dsi_ctrl_debugfs_init(dsi_ctrl, parent); - if (rc) { - pr_err("[DSI_%d] failed to init debug fs, rc=%d\n", - dsi_ctrl->cell_index, rc); - goto error; - } + dsi_ctrl_debugfs_init(dsi_ctrl, parent); error: mutex_unlock(&dsi_ctrl->ctrl_lock); @@ -2577,7 +2572,7 @@ static int _dsi_ctrl_setup_isr(struct dsi_ctrl *dsi_ctrl) dsi_ctrl->irq_info.irq_num = irq_num; disable_irq_nosync(irq_num); - pr_info("[DSI_%d] IRQ %d registered\n", + pr_debug("[DSI_%d] IRQ %d registered\n", dsi_ctrl->cell_index, irq_num); } } diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c index ef9e57ce80a6..c9430e71f753 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c @@ -390,36 +390,56 @@ end: static irqreturn_t dsi_display_panel_te_irq_handler(int irq, void *data) { struct dsi_display *display = (struct dsi_display *)data; + struct dsi_display_te_listener *tl; - /* - * This irq handler is used for sole purpose of identifying - * ESD attacks on panel and we can safely assume IRQ_HANDLED - * in case of display not being initialized yet - */ - if (!display) + if (unlikely(!display)) return IRQ_HANDLED; SDE_EVT32(SDE_EVTLOG_FUNC_CASE1); - complete_all(&display->esd_te_gate); + + spin_lock(&display->te_lock); + list_for_each_entry(tl, &display->te_listeners, head) + tl->handler(tl); + spin_unlock(&display->te_lock); + return IRQ_HANDLED; } -static void dsi_display_change_te_irq_status(struct dsi_display *display, - bool enable) +int dsi_display_add_te_listener(struct dsi_display *display, + struct dsi_display_te_listener *tl) { - if (!display) { - pr_err("Invalid params\n"); - return; - } + unsigned long flags; + bool needs_enable; - /* Handle unbalanced irq enable/disbale calls */ - if (enable && !display->is_te_irq_enabled) { + if (!display || !tl) + return -ENOENT; + + spin_lock_irqsave(&display->te_lock, flags); + needs_enable = list_empty(&display->te_listeners); + list_add(&tl->head, &display->te_listeners); + spin_unlock_irqrestore(&display->te_lock, flags); + + if (needs_enable) enable_irq(gpio_to_irq(display->disp_te_gpio)); - display->is_te_irq_enabled = true; - } else if (!enable && display->is_te_irq_enabled) { - disable_irq(gpio_to_irq(display->disp_te_gpio)); - display->is_te_irq_enabled = false; - } + + return 0; +} + +int dsi_display_remove_te_listener(struct dsi_display *display, + struct dsi_display_te_listener *tl) +{ + unsigned long flags; + + if (!display || !tl) + return -ENOENT; + + spin_lock_irqsave(&display->te_lock, flags); + list_del(&tl->head); + if (list_empty(&display->te_listeners)) + disable_irq_nosync(gpio_to_irq(display->disp_te_gpio)); + spin_unlock_irqrestore(&display->te_lock, flags); + + return 0; } static void dsi_display_register_te_irq(struct dsi_display *display) @@ -446,15 +466,16 @@ static void dsi_display_register_te_irq(struct dsi_display *display) goto error; } - init_completion(&display->esd_te_gate); te_irq = gpio_to_irq(display->disp_te_gpio); + spin_lock_init(&display->te_lock); + INIT_LIST_HEAD(&display->te_listeners); + /* Avoid deferred spurious irqs with disable_irq() */ irq_set_status_flags(te_irq, IRQ_DISABLE_UNLAZY); rc = devm_request_irq(dev, te_irq, dsi_display_panel_te_irq_handler, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, - "TE_GPIO", display); + IRQF_TRIGGER_RISING, "TE_GPIO", display); if (rc) { pr_err("TE request_irq failed for ESD rc:%d\n", rc); irq_clear_status_flags(te_irq, IRQ_DISABLE_UNLAZY); @@ -462,7 +483,6 @@ static void dsi_display_register_te_irq(struct dsi_display *display) } disable_irq(te_irq); - display->is_te_irq_enabled = false; return; @@ -756,21 +776,31 @@ static int dsi_display_status_bta_request(struct dsi_display *display) return rc; } +static void _handle_esd_te(struct dsi_display_te_listener *tl) +{ + struct completion *esd_te_gate = tl->data; + + complete(esd_te_gate); +} + static int dsi_display_status_check_te(struct dsi_display *display) { int rc = 1; int const esd_te_timeout = msecs_to_jiffies(3*20); + DECLARE_COMPLETION(esd_te_gate); + struct dsi_display_te_listener tl = { + .handler = _handle_esd_te, + .data = &esd_te_gate, + }; - dsi_display_change_te_irq_status(display, true); + dsi_display_add_te_listener(display, &tl); - reinit_completion(&display->esd_te_gate); - if (!wait_for_completion_timeout(&display->esd_te_gate, - esd_te_timeout)) { + if (!wait_for_completion_timeout(&esd_te_gate, esd_te_timeout)) { pr_err("TE check failed\n"); rc = -EINVAL; } - dsi_display_change_te_irq_status(display, false); + dsi_display_remove_te_listener(display, &tl); return rc; } @@ -1417,7 +1447,6 @@ static ssize_t debugfs_alter_esd_check_mode(struct file *file, if (!strcmp(buf, "te_signal_check\n")) { pr_info("ESD check is switched to TE mode by user\n"); esd_config->status_mode = ESD_MODE_PANEL_TE; - dsi_display_change_te_irq_status(display, true); } if (!strcmp(buf, "reg_read\n")) { @@ -1430,8 +1459,6 @@ static ssize_t debugfs_alter_esd_check_mode(struct file *file, goto error; } esd_config->status_mode = ESD_MODE_REG_READ; - if (dsi_display_is_te_based_esd(display)) - dsi_display_change_te_irq_status(display, false); } if (!strcmp(buf, "esd_sw_sim_success\n")) @@ -1552,7 +1579,7 @@ static int dsi_display_debugfs_init(struct dsi_display *display) dir = debugfs_create_dir(display->name, NULL); if (IS_ERR_OR_NULL(dir)) { rc = PTR_ERR(dir); - pr_err("[%s] debugfs create dir failed, rc = %d\n", + pr_debug("[%s] debugfs create dir failed, rc = %d\n", display->name, rc); goto error; } @@ -5283,11 +5310,7 @@ static int dsi_display_bind(struct device *dev, goto error; } - rc = dsi_display_debugfs_init(display); - if (rc) { - pr_err("[%s] debugfs init failed, rc=%d\n", display->name, rc); - goto error; - } + dsi_display_debugfs_init(display); atomic_set(&display->clkrate_change_pending, 0); display->cached_clk_rate = 0; diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h index 73d8b5ac29dd..dc86e3f3dbdd 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h @@ -158,8 +158,8 @@ struct dsi_display_ext_bridge { * @sw_te_using_wd: Is software te enabled * @display_lock: Mutex for dsi_display interface. * @disp_te_gpio: GPIO for panel TE interrupt. - * @is_te_irq_enabled:bool to specify whether TE interrupt is enabled. - * @esd_te_gate: completion gate to signal TE interrupt. + * @te_listeners: List of listeners registered for TE callbacks. + * @te_lock: Lock protecting te_listeners list. * @ctrl_count: Number of DSI interfaces required by panel. * @ctrl: Controller information for DSI display. * @panel: Handle to DSI panel. @@ -209,8 +209,8 @@ struct dsi_display { bool sw_te_using_wd; struct mutex display_lock; int disp_te_gpio; - bool is_te_irq_enabled; - struct completion esd_te_gate; + struct list_head te_listeners; + spinlock_t te_lock; u32 ctrl_count; struct dsi_display_ctrl ctrl[MAX_DSI_CTRLS_PER_DISPLAY]; @@ -285,6 +285,48 @@ struct dsi_display { atomic_t fod_ui; }; +/** + * struct dsi_display_te_listener - data for TE listener + * @head: List node pointer. + * @handler: TE callback function, called in atomic context. + * @data: Private data that is not modified by add/remove API + */ +struct dsi_display_te_listener { + struct list_head head; + void (*handler)(struct dsi_display_te_listener *); + void *data; +}; + +/** + * dsi_display_add_te_listener - adds a new listener for TE events + * @display: Handle to display + * @tl: TE listener struct + * + * Adds a new TE listener and enables TE irq if there are no other listeners. + * Upon TE interrupt, the handler passed in will be called back in atomic + * context. + * + * Note: caller is responsible for lifetime of @tl which should be available + * until dsi_display_remove_te_listener() is called. + * + * Returns: 0 on success, otherwise errno on failure + */ +int dsi_display_add_te_listener(struct dsi_display *display, + struct dsi_display_te_listener *tl); + +/** + * dsi_display_add_te_listener - removes listener for TE events + * @display: Handle to display + * @tl: TE listener struct + * + * Removes TE listener and disables TE irq if there are no other listeners. + * + * Returns: 0 on success, otherwise errno on failure + */ +int dsi_display_remove_te_listener(struct dsi_display *display, + struct dsi_display_te_listener *tl); + + int dsi_display_dev_probe(struct platform_device *pdev); int dsi_display_dev_remove(struct platform_device *pdev); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 7656b52683e1..16bc332881ea 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -61,6 +61,8 @@ #define MSM_VERSION_MINOR 2 #define MSM_VERSION_PATCHLEVEL 0 +static struct kmem_cache *kmem_vblank_work_pool; + static void msm_fb_output_poll_changed(struct drm_device *dev) { struct msm_drm_private *priv = NULL; @@ -254,7 +256,7 @@ static void vblank_ctrl_worker(struct kthread_work *work) else kms->funcs->disable_vblank(kms, priv->crtcs[cur_work->crtc_id]); - kfree(cur_work); + kmem_cache_free(kmem_vblank_work_pool, cur_work); } static int vblank_ctrl_queue_work(struct msm_drm_private *priv, @@ -265,7 +267,7 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv, if (!priv || crtc_id >= priv->num_crtcs) return -EINVAL; - cur_work = kzalloc(sizeof(*cur_work), GFP_ATOMIC); + cur_work = kmem_cache_zalloc(kmem_vblank_work_pool, GFP_ATOMIC); if (!cur_work) return -ENOMEM; @@ -784,10 +786,21 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id; kthread_init_worker(&priv->disp_thread[i].worker); priv->disp_thread[i].dev = ddev; - priv->disp_thread[i].thread = - kthread_run(kthread_worker_fn, - &priv->disp_thread[i].worker, - "crtc_commit:%d", priv->disp_thread[i].crtc_id); + /* Only pin actual display thread to big cluster */ + if (i == 0) { + priv->disp_thread[i].thread = + kthread_run_perf_critical(kthread_worker_fn, + &priv->disp_thread[i].worker, + "crtc_commit:%d", priv->disp_thread[i].crtc_id); + pr_info("%i to big cluster", priv->disp_thread[i].crtc_id); + } else { + priv->disp_thread[i].thread = + kthread_run(kthread_worker_fn, + &priv->disp_thread[i].worker, + "crtc_commit:%d", priv->disp_thread[i].crtc_id); + pr_info("%i to little cluster", priv->disp_thread[i].crtc_id); + } + ret = sched_setscheduler(priv->disp_thread[i].thread, SCHED_FIFO, ¶m); if (ret) @@ -803,10 +816,20 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id; kthread_init_worker(&priv->event_thread[i].worker); priv->event_thread[i].dev = ddev; - priv->event_thread[i].thread = - kthread_run(kthread_worker_fn, - &priv->event_thread[i].worker, - "crtc_event:%d", priv->event_thread[i].crtc_id); + /* Only pin first event thread to big cluster */ + if (i == 0) { + priv->event_thread[i].thread = + kthread_run_perf_critical(kthread_worker_fn, + &priv->event_thread[i].worker, + "crtc_event:%d", priv->event_thread[i].crtc_id); + pr_info("%i to big cluster", priv->event_thread[i].crtc_id); + } else { + priv->event_thread[i].thread = + kthread_run(kthread_worker_fn, + &priv->event_thread[i].worker, + "crtc_event:%d", priv->event_thread[i].crtc_id); + pr_info("%i to little cluster", priv->event_thread[i].crtc_id); + } /** * event thread should also run at same priority as disp_thread * because it is handling frame_done events. A lower priority @@ -851,7 +874,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) * other important events. */ kthread_init_worker(&priv->pp_event_worker); - priv->pp_event_thread = kthread_run(kthread_worker_fn, + priv->pp_event_thread = kthread_run_perf_critical(kthread_worker_fn, &priv->pp_event_worker, "pp_event"); ret = sched_setscheduler(priv->pp_event_thread, @@ -2315,6 +2338,7 @@ static int __init msm_drm_register(void) return -EINVAL; DBG("init"); + kmem_vblank_work_pool = KMEM_CACHE(vblank_work, SLAB_HWCACHE_ALIGN | SLAB_PANIC); msm_smmu_driver_init(); msm_dsi_register(); msm_edp_register(); diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c index 82e1a5a69046..f8f04ecbe7aa 100644 --- a/drivers/gpu/drm/msm/sde/sde_core_perf.c +++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -551,8 +551,8 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc, /* display rsc override during solver mode */ if (kms->perf.bw_vote_mode == DISP_RSC_MODE && - get_sde_rsc_current_state(SDE_RSC_INDEX) == - SDE_RSC_CMD_STATE) { + get_sde_rsc_current_state(SDE_RSC_INDEX) != + SDE_RSC_CLK_STATE) { /* update new bandwidth in all cases */ if (params_changed && ((new->bw_ctl[i] != old->bw_ctl[i]) || @@ -602,10 +602,9 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc, } if (kms->perf.bw_vote_mode == DISP_RSC_MODE && - ((get_sde_rsc_version(SDE_RSC_INDEX) != SDE_RSC_REV_3) || - (get_sde_rsc_current_state(SDE_RSC_INDEX) != SDE_RSC_CLK_STATE + ((get_sde_rsc_current_state(SDE_RSC_INDEX) != SDE_RSC_CLK_STATE && params_changed) || - (get_sde_rsc_current_state(SDE_RSC_INDEX) == SDE_RSC_CLK_STATE + (get_sde_rsc_current_state(SDE_RSC_INDEX) == SDE_RSC_CLK_STATE && update_bus))) sde_rsc_client_trigger_vote(sde_cstate->rsc_client, update_bus ? true : false); diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index e98392f18dff..d5662aead0fa 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -716,6 +716,47 @@ static int _sde_debugfs_fps_status(struct inode *inode, struct file *file) } #endif +static ssize_t early_wakeup_store(struct device *device, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct drm_crtc *crtc; + struct sde_crtc *sde_crtc; + struct msm_drm_private *priv; + u32 crtc_id; + bool trigger; + + if (!device || !buf || !count) { + SDE_ERROR("invalid input param(s)\n"); + return -EINVAL; + } + + if (kstrtobool(buf, &trigger) < 0) + return -EINVAL; + + if (!trigger) + return count; + + crtc = dev_get_drvdata(device); + if (!crtc || !crtc->dev || !crtc->dev->dev_private) { + SDE_ERROR("invalid crtc\n"); + return -EINVAL; + } + + sde_crtc = to_sde_crtc(crtc); + priv = crtc->dev->dev_private; + + crtc_id = drm_crtc_index(crtc); + if (crtc_id >= ARRAY_SIZE(priv->disp_thread)) { + SDE_ERROR("invalid crtc index[%d]\n", crtc_id); + return -EINVAL; + } + + kthread_queue_work(&priv->disp_thread[crtc_id].worker, + &sde_crtc->early_wakeup_work); + + return count; +} + static ssize_t set_fps_periodicity(struct device *device, struct device_attribute *attr, const char *buf, size_t count) { @@ -885,10 +926,12 @@ static DEVICE_ATTR_RO(vsync_event); static DEVICE_ATTR(measured_fps, 0444, measured_fps_show, NULL); static DEVICE_ATTR(fps_periodicity_ms, 0644, fps_periodicity_show, set_fps_periodicity); +static DEVICE_ATTR_WO(early_wakeup); static struct attribute *sde_crtc_dev_attrs[] = { &dev_attr_vsync_event.attr, &dev_attr_measured_fps.attr, &dev_attr_fps_periodicity_ms.attr, + &dev_attr_early_wakeup.attr, NULL }; @@ -2009,7 +2052,7 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc, struct drm_plane_state *state; struct sde_crtc_state *cstate; struct sde_plane_state *pstate = NULL; - struct plane_state *pstates = NULL; + struct plane_state pstates[SDE_PSTATES_MAX]; struct sde_format *format; struct sde_hw_ctl *ctl; struct sde_hw_mixer *lm; @@ -2036,10 +2079,7 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc, sde_crtc->sbuf_rot_id = 0x0; sde_crtc->sbuf_rot_id_delta = 0x0; - pstates = kcalloc(SDE_PSTATES_MAX, - sizeof(struct plane_state), GFP_KERNEL); - if (!pstates) - return; + memset(pstates, 0, SDE_PSTATES_MAX * sizeof(struct plane_state)); drm_atomic_crtc_for_each_plane(plane, crtc) { state = plane->state; @@ -2080,7 +2120,7 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc, format = to_sde_format(msm_framebuffer_format(pstate->base.fb)); if (!format) { SDE_ERROR("invalid format\n"); - goto end; + return; } if (pstate->stage == SDE_STAGE_BASE && format->alpha_enable) @@ -2135,7 +2175,6 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc, _sde_crtc_set_src_split_order(crtc, pstates, cnt); if (lm && lm->ops.setup_dim_layer) { - cstate = to_sde_crtc_state(crtc->state); for (i = 0; i < cstate->num_dim_layers; i++) _sde_crtc_setup_dim_layer_cfg(crtc, sde_crtc, mixer, &cstate->dim_layer[i]); @@ -2146,9 +2185,6 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc, } _sde_crtc_program_lm_output_roi(crtc); - -end: - kfree(pstates); } static void _sde_crtc_swap_mixers_for_right_partial_update( @@ -2257,9 +2293,11 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc, mixer[i].hw_ctl); /* clear dim_layer settings */ - lm = mixer[i].hw_lm; - if (lm->ops.clear_dim_layer) - lm->ops.clear_dim_layer(lm); + if (sde_crtc_state->num_dim_layers) { + lm = mixer[i].hw_lm; + if (lm->ops.clear_dim_layer) + lm->ops.clear_dim_layer(lm); + } } _sde_crtc_swap_mixers_for_right_partial_update(crtc); @@ -5342,7 +5380,7 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc, { struct drm_device *dev; struct sde_crtc *sde_crtc; - struct plane_state *pstates = NULL; + struct plane_state pstates[SDE_PSTATES_MAX] __aligned(8); struct sde_crtc_state *cstate; struct sde_kms *kms; @@ -5352,7 +5390,7 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc, int cnt = 0, rc = 0, mixer_width, i, z_pos, mixer_height; - struct sde_multirect_plane_states *multirect_plane = NULL; + struct sde_multirect_plane_states multirect_plane[SDE_MULTIRECT_PLANE_MAX] __aligned(8); int multirect_count = 0; const struct drm_plane_state *pipe_staged[SSPP_MAX]; int left_zpos_cnt = 0, right_zpos_cnt = 0; @@ -5383,17 +5421,8 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc, goto end; } - pstates = kcalloc(SDE_PSTATES_MAX, - sizeof(struct plane_state), GFP_KERNEL); - - multirect_plane = kcalloc(SDE_MULTIRECT_PLANE_MAX, - sizeof(struct sde_multirect_plane_states), - GFP_KERNEL); - - if (!pstates || !multirect_plane) { - rc = -ENOMEM; - goto end; - } + memset(pstates, 0, sizeof(pstates)); + memset(multirect_plane, 0, sizeof(multirect_plane)); mode = &state->adjusted_mode; SDE_DEBUG("%s: check", sde_crtc->name); @@ -5635,8 +5664,6 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc, } end: - kfree(pstates); - kfree(multirect_plane); _sde_crtc_rp_free_unused(&cstate->rp); return rc; } @@ -6924,6 +6951,40 @@ static void __sde_crtc_idle_notify_work(struct kthread_work *work) } } +/* + * __sde_crtc_early_wakeup_work - trigger early wakeup from user space + */ +static void __sde_crtc_early_wakeup_work(struct kthread_work *work) +{ + struct sde_crtc *sde_crtc = container_of(work, struct sde_crtc, + early_wakeup_work); + struct drm_crtc *crtc; + struct drm_device *dev; + struct msm_drm_private *priv; + struct sde_kms *sde_kms; + + if (!sde_crtc) { + SDE_ERROR("invalid sde crtc\n"); + return; + } + + if (!sde_crtc->enabled) { + SDE_INFO("sde crtc is not enabled\n"); + return; + } + + crtc = &sde_crtc->base; + dev = crtc->dev; + if (!dev) { + SDE_ERROR("invalid drm device\n"); + return; + } + + priv = dev->dev_private; + sde_kms = to_sde_kms(priv->kms); + sde_kms_trigger_early_wakeup(sde_kms, crtc); +} + /* initialize crtc */ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane) { @@ -7015,6 +7076,8 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane) kthread_init_delayed_work(&sde_crtc->idle_notify_work, __sde_crtc_idle_notify_work); + kthread_init_work(&sde_crtc->early_wakeup_work, + __sde_crtc_early_wakeup_work); SDE_DEBUG("%s: successfully initialized crtc\n", sde_crtc->name); return crtc; @@ -7088,6 +7151,7 @@ static int _sde_crtc_event_enable(struct sde_kms *kms, if (!node) return -ENOMEM; INIT_LIST_HEAD(&node->list); + INIT_LIST_HEAD(&node->irq.list); node->func = custom_events[i].func; node->event = event; node->state = IRQ_NOINIT; @@ -7113,8 +7177,6 @@ static int _sde_crtc_event_enable(struct sde_kms *kms, return ret; } - INIT_LIST_HEAD(&node->irq.list); - mutex_lock(&crtc->crtc_lock); ret = node->func(crtc_drm, true, &node->irq); if (!ret) { diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h index a26b0f425f86..6d0031b083b1 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.h +++ b/drivers/gpu/drm/msm/sde/sde_crtc.h @@ -222,6 +222,7 @@ struct sde_crtc_fps_info { * @sbuf_rot_id_old: inline rotator id for previous commit * @sbuf_rot_id_delta: inline rotator id for current delta state * @idle_notify_work: delayed worker to notify idle timeout to user space + * @early_wakeup_work: work to trigger early wakeup * @power_event : registered power event handle * @cur_perf : current performance committed to clock/bandwidth driver * @rp_lock : serialization lock for resource pool @@ -292,6 +293,7 @@ struct sde_crtc { u32 sbuf_rot_id_old; u32 sbuf_rot_id_delta; struct kthread_delayed_work idle_notify_work; + struct kthread_work early_wakeup_work; struct sde_power_event *power_event; diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index 5af5d6e137f7..5cb9bcf5debc 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark * @@ -1973,35 +1973,15 @@ static int _sde_encoder_update_rsc_client( qsync_mode = sde_connector_get_qsync_mode( sde_enc->cur_master->connector); - if (IS_SDE_MAJOR_SAME(sde_kms->core_rev, SDE_HW_VER_620)) { - if (sde_encoder_in_clone_mode(drm_enc) || - !disp_info->is_primary || (disp_info->is_primary && - qsync_mode)) - rsc_state = enable ? SDE_RSC_CLK_STATE : - SDE_RSC_IDLE_STATE; - else if (sde_encoder_check_curr_mode(drm_enc, - MSM_DISPLAY_CMD_MODE)) - rsc_state = enable ? SDE_RSC_CMD_STATE : - SDE_RSC_IDLE_STATE; - else if (sde_encoder_check_curr_mode(drm_enc, - MSM_DISPLAY_VIDEO_MODE)) - rsc_state = enable ? SDE_RSC_VID_STATE : - SDE_RSC_IDLE_STATE; - } else { - if (sde_encoder_in_clone_mode(drm_enc)) - rsc_state = enable ? SDE_RSC_CLK_STATE : - SDE_RSC_IDLE_STATE; - else - rsc_state = enable ? ((disp_info->is_primary && - (sde_encoder_check_curr_mode(drm_enc, - MSM_DISPLAY_CMD_MODE)) && !qsync_mode) ? - SDE_RSC_CMD_STATE : SDE_RSC_VID_STATE) : - SDE_RSC_IDLE_STATE; - } + if (sde_encoder_in_clone_mode(drm_enc) || !disp_info->is_primary || + (disp_info->is_primary && qsync_mode)) + rsc_state = enable ? SDE_RSC_CLK_STATE : SDE_RSC_IDLE_STATE; + else if (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) + rsc_state = enable ? SDE_RSC_CMD_STATE : SDE_RSC_IDLE_STATE; + else if (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE) + rsc_state = enable ? SDE_RSC_VID_STATE : SDE_RSC_IDLE_STATE; - if (IS_SDE_MAJOR_SAME(sde_kms->core_rev, SDE_HW_VER_620) && - (rsc_state == SDE_RSC_VID_STATE)) - rsc_state = SDE_RSC_CLK_STATE; + rsc_state = SDE_RSC_CLK_STATE; SDE_EVT32(rsc_state, qsync_mode); diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h index 1fe0075d03d3..55b63480d731 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h @@ -405,6 +405,7 @@ struct sde_encoder_phys_cmd { atomic_t pending_vblank_cnt; wait_queue_head_t pending_vblank_wq; u32 ctl_start_threshold; + struct work_struct ctl_wait_work; }; /** diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c index c6f5b15c70b4..4b8f7024990e 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c @@ -1471,6 +1471,15 @@ static int _sde_encoder_phys_cmd_wait_for_ctl_start( return ret; } +static void sde_encoder_phys_cmd_ctl_start_work(struct work_struct *work) +{ + struct sde_encoder_phys_cmd *cmd_enc = container_of(work, + typeof(*cmd_enc), + ctl_wait_work); + + _sde_encoder_phys_cmd_wait_for_ctl_start(&cmd_enc->base); +} + static int sde_encoder_phys_cmd_wait_for_tx_complete( struct sde_encoder_phys *phys_enc) { @@ -1505,9 +1514,9 @@ static int sde_encoder_phys_cmd_wait_for_commit_done( /* only required for master controller */ if (sde_encoder_phys_cmd_is_master(phys_enc)) - rc = _sde_encoder_phys_cmd_wait_for_ctl_start(phys_enc); + queue_work(system_unbound_wq, &cmd_enc->ctl_wait_work); - if (!rc && sde_encoder_phys_cmd_is_master(phys_enc) && + if (sde_encoder_phys_cmd_is_master(phys_enc) && cmd_enc->autorefresh.cfg.enable) rc = _sde_encoder_phys_cmd_wait_for_autorefresh_done(phys_enc); @@ -1592,6 +1601,9 @@ static void sde_encoder_phys_cmd_prepare_commit( if (!sde_encoder_phys_cmd_is_master(phys_enc)) return; + /* Wait for ctl_start interrupt for the previous commit if needed */ + flush_work(&cmd_enc->ctl_wait_work); + SDE_EVT32(DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0, cmd_enc->autorefresh.cfg.enable); @@ -1798,6 +1810,7 @@ struct sde_encoder_phys *sde_encoder_phys_cmd_init( init_waitqueue_head(&cmd_enc->pending_vblank_wq); atomic_set(&cmd_enc->autorefresh.kickoff_cnt, 0); init_waitqueue_head(&cmd_enc->autorefresh.kickoff_wq); + INIT_WORK(&cmd_enc->ctl_wait_work, sde_encoder_phys_cmd_ctl_start_work); SDE_DEBUG_CMDENC(cmd_enc, "created\n"); diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c index 2346317aa337..e48d84134634 100644 --- a/drivers/gpu/drm/msm/sde/sde_fence.c +++ b/drivers/gpu/drm/msm/sde/sde_fence.c @@ -19,6 +19,8 @@ #define TIMELINE_VAL_LENGTH 128 +static struct kmem_cache *kmem_fence_pool; + void *sde_sync_get(uint64_t fd) { /* force signed compare, fdget accepts an int argument */ @@ -91,7 +93,9 @@ uint32_t sde_sync_get_name_prefix(void *fence) struct sde_fence { struct dma_fence base; struct sde_fence_context *ctx; +#ifdef CONFIG_FENCE_DEBUG char name[SDE_FENCE_NAME_SIZE]; +#endif struct list_head fence_list; int fd; }; @@ -116,16 +120,24 @@ static inline struct sde_fence *to_sde_fence(struct dma_fence *fence) static const char *sde_fence_get_driver_name(struct dma_fence *fence) { +#ifdef CONFIG_FENCE_DEBUG struct sde_fence *f = to_sde_fence(fence); return f->name; +#else + return "sde"; +#endif } static const char *sde_fence_get_timeline_name(struct dma_fence *fence) { +#ifdef CONFIG_FENCE_DEBUG struct sde_fence *f = to_sde_fence(fence); return f->ctx->name; +#else + return "timeline"; +#endif } static bool sde_fence_enable_signaling(struct dma_fence *fence) @@ -151,7 +163,7 @@ static void sde_fence_release(struct dma_fence *fence) if (fence) { f = to_sde_fence(fence); kref_put(&f->ctx->kref, sde_fence_destroy); - kfree(f); + kmem_cache_free(kmem_fence_pool, f); } } @@ -204,13 +216,15 @@ static int _sde_fence_create_fd(void *fence_ctx, uint32_t val) goto exit; } - sde_fence = kzalloc(sizeof(*sde_fence), GFP_KERNEL); + sde_fence = kmem_cache_zalloc(kmem_fence_pool, GFP_KERNEL); if (unlikely(!sde_fence)) return -ENOMEM; sde_fence->ctx = fence_ctx; +#ifdef CONFIG_FENCE_DEBUG snprintf(sde_fence->name, SDE_FENCE_NAME_SIZE, "sde_fence:%s:%u", sde_fence->ctx->name, val); +#endif dma_fence_init(&sde_fence->base, &sde_fence_ops, &ctx->lock, ctx->context, val); kref_get(&ctx->kref); @@ -218,8 +232,10 @@ static int _sde_fence_create_fd(void *fence_ctx, uint32_t val) /* create fd */ fd = get_unused_fd_flags(0); if (unlikely(fd < 0)) { +#ifdef CONFIG_FENCE_DEBUG SDE_ERROR("failed to get_unused_fd_flags(), %s\n", sde_fence->name); +#endif dma_fence_put(&sde_fence->base); goto exit; } @@ -229,7 +245,9 @@ static int _sde_fence_create_fd(void *fence_ctx, uint32_t val) if (unlikely(sync_file == NULL)) { put_unused_fd(fd); fd = -EINVAL; +#ifdef CONFIG_FENCE_DEBUG SDE_ERROR("couldn't create fence, %s\n", sde_fence->name); +#endif dma_fence_put(&sde_fence->base); goto exit; } @@ -261,7 +279,9 @@ struct sde_fence_context *sde_fence_init(const char *name, uint32_t drm_id) return ERR_PTR(-ENOMEM); } +#ifdef CONFIG_FENCE_DEBUG strlcpy(ctx->name, name, ARRAY_SIZE(ctx->name)); +#endif ctx->drm_id = drm_id; kref_init(&ctx->kref); ctx->context = dma_fence_context_alloc(1); @@ -500,3 +520,11 @@ void sde_debugfs_timeline_dump(struct sde_fence_context *ctx, } spin_unlock(&ctx->list_lock); } + +static int __init sde_kmem_pool_init(void) +{ + kmem_fence_pool = KMEM_CACHE(sde_fence, SLAB_HWCACHE_ALIGN | SLAB_PANIC); + return 0; +} + +module_init(sde_kmem_pool_init); diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c index fe0c22d09cd0..155645567ce2 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c @@ -1226,9 +1226,9 @@ static void _sde_sspp_setup_cursor(struct sde_mdss_cfg *sde_cfg, struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk, struct sde_prop_value *prop_value, u32 *cursor_count) { - if (!IS_SDE_MAJOR_MINOR_SAME(sde_cfg->hwversion, SDE_HW_VER_300)) - SDE_ERROR("invalid sspp type %d, xin id %d\n", - sspp->type, sspp->xin_id); + SDE_ERROR("invalid sspp type %d, xin id %d\n", + sspp->type, sspp->xin_id); + set_bit(SDE_SSPP_CURSOR, &sspp->features); sblk->maxupscale = SSPP_UNITY_SCALE; sblk->maxdwnscale = SSPP_UNITY_SCALE; @@ -1529,6 +1529,7 @@ static int sde_sspp_parse_dt(struct device_node *np, sde_cfg->mdp[j].clk_ctrls[sspp->clk_ctrl].bit_off = PROP_BITVALUE_ACCESS(prop_value, SSPP_CLK_CTRL, i, 1); + sde_cfg->mdp[j].clk_ctrls[sspp->clk_ctrl].val = -1; } SDE_DEBUG( @@ -1861,10 +1862,7 @@ static int sde_intf_parse_dt(struct device_node *np, if (IS_SDE_CTL_REV_100(sde_cfg->ctl_rev)) set_bit(SDE_INTF_INPUT_CTRL, &intf->features); - if (IS_SDE_MAJOR_SAME((sde_cfg->hwversion), SDE_HW_VER_500) || - (IS_SDE_MAJOR_MINOR_SAME((sde_cfg->hwversion), - SDE_HW_VER_620))) - set_bit(SDE_INTF_TE, &intf->features); + set_bit(SDE_INTF_TE, &intf->features); } end: @@ -1931,12 +1929,7 @@ static int sde_wb_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg) goto end; } - if (IS_SDE_MAJOR_MINOR_SAME((sde_cfg->hwversion), - SDE_HW_VER_170)) - wb->vbif_idx = VBIF_NRT; - else - wb->vbif_idx = VBIF_RT; - + wb->vbif_idx = VBIF_RT; wb->len = PROP_VALUE_ACCESS(prop_value, WB_LEN, 0); if (!prop_exists[WB_LEN]) wb->len = DEFAULT_SDE_HW_BLOCK_LEN; @@ -1977,6 +1970,7 @@ static int sde_wb_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg) sde_cfg->mdp[j].clk_ctrls[wb->clk_ctrl].bit_off = PROP_BITVALUE_ACCESS(prop_value, WB_CLK_CTRL, i, 1); + sde_cfg->mdp[j].clk_ctrls[wb->clk_ctrl].val = -1; } wb->format_list = sde_cfg->wb_formats; @@ -2163,6 +2157,7 @@ static void _sde_inline_rot_parse_dt(struct device_node *np, sde_cfg->mdp[j].clk_ctrls[index].bit_off = PROP_BITVALUE_ACCESS(prop_value, INLINE_ROT_CLK_CTRL, i, 1); + sde_cfg->mdp[j].clk_ctrls[index].val = -1; } SDE_DEBUG("rot- xin:%d, num:%d, rd:%d, clk:%d:0x%x/%d\n", @@ -2906,8 +2901,6 @@ static int sde_pp_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg) pp->id - PINGPONG_0); major_version = SDE_HW_MAJOR(sde_cfg->hwversion); - if (major_version < SDE_HW_MAJOR(SDE_HW_VER_500)) - set_bit(SDE_PINGPONG_TE, &pp->features); sblk->te2.base = PROP_VALUE_ACCESS(prop_value, TE2_OFF, i); if (sblk->te2.base) { @@ -3203,8 +3196,6 @@ static int sde_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg) PROP_VALUE_ACCESS(prop_value, SMART_PANEL_ALIGN_MODE, 0); major_version = SDE_HW_MAJOR(cfg->hwversion); - if (major_version < SDE_HW_MAJOR(SDE_HW_VER_500)) - set_bit(SDE_MDP_VSYNC_SEL, &cfg->mdp[0].features); if (prop_exists[SEC_SID_MASK]) { cfg->sec_sid_mask_count = prop_count[SEC_SID_MASK]; @@ -3744,22 +3735,8 @@ static int sde_hardware_format_caps(struct sde_mdss_cfg *sde_cfg, int rc = 0; uint32_t dma_list_size, vig_list_size, wb2_list_size; uint32_t virt_vig_list_size; - uint32_t cursor_list_size = 0; uint32_t index = 0; - if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_300)) { - cursor_list_size = ARRAY_SIZE(cursor_formats); - sde_cfg->cursor_formats = kcalloc(cursor_list_size, - sizeof(struct sde_format_extended), GFP_KERNEL); - if (!sde_cfg->cursor_formats) { - rc = -ENOMEM; - goto end; - } - index = sde_copy_formats(sde_cfg->cursor_formats, - cursor_list_size, 0, cursor_formats, - ARRAY_SIZE(cursor_formats)); - } - dma_list_size = ARRAY_SIZE(plane_formats); vig_list_size = ARRAY_SIZE(plane_formats_yuv); virt_vig_list_size = ARRAY_SIZE(plane_formats); @@ -3770,11 +3747,7 @@ static int sde_hardware_format_caps(struct sde_mdss_cfg *sde_cfg, + ARRAY_SIZE(tp10_ubwc_formats) + ARRAY_SIZE(p010_formats); virt_vig_list_size += ARRAY_SIZE(rgb_10bit_formats); - if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_400) || - (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_410)) || - (IS_SDE_MAJOR_SAME((hw_rev), SDE_HW_VER_500)) || - (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_620))) - vig_list_size += ARRAY_SIZE(p010_ubwc_formats); + vig_list_size += ARRAY_SIZE(p010_ubwc_formats); wb2_list_size += ARRAY_SIZE(rgb_10bit_formats) + ARRAY_SIZE(tp10_ubwc_formats); @@ -3808,17 +3781,7 @@ static int sde_hardware_format_caps(struct sde_mdss_cfg *sde_cfg, goto end; } - if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_300) || - IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_400) || - IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_410) || - IS_SDE_MAJOR_SAME((hw_rev), SDE_HW_VER_500) || - IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_620)) - sde_cfg->has_hdr = true; - - /* Disable HDR for SM6150 target only */ - if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_530) - || IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_540)) - sde_cfg->has_hdr = false; + sde_cfg->has_hdr = true; index = sde_copy_formats(sde_cfg->dma_formats, dma_list_size, 0, plane_formats, ARRAY_SIZE(plane_formats)); @@ -3833,13 +3796,9 @@ static int sde_hardware_format_caps(struct sde_mdss_cfg *sde_cfg, ARRAY_SIZE(rgb_10bit_formats)); index += sde_copy_formats(sde_cfg->vig_formats, vig_list_size, index, p010_formats, ARRAY_SIZE(p010_formats)); - if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_400) || - (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_410)) || - (IS_SDE_MAJOR_SAME((hw_rev), SDE_HW_VER_500)) || - (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_620))) - index += sde_copy_formats(sde_cfg->vig_formats, - vig_list_size, index, p010_ubwc_formats, - ARRAY_SIZE(p010_ubwc_formats)); + index += sde_copy_formats(sde_cfg->vig_formats, + vig_list_size, index, p010_ubwc_formats, + ARRAY_SIZE(p010_ubwc_formats)); index += sde_copy_formats(sde_cfg->vig_formats, vig_list_size, index, tp10_ubwc_formats, ARRAY_SIZE(tp10_ubwc_formats)); diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h index 95c267021de5..1143146034e6 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h @@ -36,38 +36,16 @@ #define SDE_HW_STEP(rev) ((rev) & 0xFFFF) #define SDE_HW_MAJOR_MINOR(rev) ((rev) >> 16) -#define IS_SDE_MAJOR_SAME(rev1, rev2) \ - (SDE_HW_MAJOR((rev1)) == SDE_HW_MAJOR((rev2))) - -#define IS_SDE_MAJOR_MINOR_SAME(rev1, rev2) \ - (SDE_HW_MAJOR_MINOR((rev1)) == SDE_HW_MAJOR_MINOR((rev2))) - -#define SDE_HW_VER_170 SDE_HW_VER(1, 7, 0) /* 8996 v1.0 */ -#define SDE_HW_VER_171 SDE_HW_VER(1, 7, 1) /* 8996 v2.0 */ -#define SDE_HW_VER_172 SDE_HW_VER(1, 7, 2) /* 8996 v3.0 */ -#define SDE_HW_VER_300 SDE_HW_VER(3, 0, 0) /* 8998 v1.0 */ -#define SDE_HW_VER_301 SDE_HW_VER(3, 0, 1) /* 8998 v1.1 */ -#define SDE_HW_VER_400 SDE_HW_VER(4, 0, 0) /* sdm845 v1.0 */ -#define SDE_HW_VER_401 SDE_HW_VER(4, 0, 1) /* sdm845 v2.0 */ -#define SDE_HW_VER_410 SDE_HW_VER(4, 1, 0) /* sdm670 v1.0 */ -#define SDE_HW_VER_500 SDE_HW_VER(5, 0, 0) /* sm8150 v1.0 */ -#define SDE_HW_VER_501 SDE_HW_VER(5, 0, 1) /* sm8150 v2.0 */ -#define SDE_HW_VER_510 SDE_HW_VER(5, 1, 0) /* sdmshrike v1.0 */ -#define SDE_HW_VER_520 SDE_HW_VER(5, 2, 0) /* sdmmagpie v1.0 */ -#define SDE_HW_VER_530 SDE_HW_VER(5, 3, 0) /* sm6150 v1.0 */ -#define SDE_HW_VER_540 SDE_HW_VER(5, 4, 0) /* sdmtrinket v1.0 */ -#define SDE_HW_VER_620 SDE_HW_VER(6, 2, 0) /* atoll*/ - -#define IS_MSM8996_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_170) -#define IS_MSM8998_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_300) -#define IS_SDM845_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_400) -#define IS_SDM670_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_410) -#define IS_SM8150_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_500) -#define IS_SDMSHRIKE_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_510) -#define IS_SDMMAGPIE_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_520) -#define IS_SM6150_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_530) -#define IS_SDMTRINKET_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_540) -#define IS_ATOLL_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_620) +#define IS_MSM8996_TARGET(rev) false +#define IS_MSM8998_TARGET(rev) false +#define IS_SDM845_TARGET(rev) false +#define IS_SDM670_TARGET(rev) false +#define IS_SM8150_TARGET(rev) true +#define IS_SDMSHRIKE_TARGET(rev) false +#define IS_SDMMAGPIE_TARGET(rev) false +#define IS_SM6150_TARGET(rev) false +#define IS_SDMTRINKET_TARGET(rev) false +#define IS_ATOLL_TARGET(rev) false #define SDE_HW_BLK_NAME_LEN 16 @@ -106,12 +84,10 @@ enum { SDE_HW_UBWC_VER_20 = SDE_HW_UBWC_VER(0x200), SDE_HW_UBWC_VER_30 = SDE_HW_UBWC_VER(0x300), }; -#define IS_UBWC_10_SUPPORTED(rev) \ - IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_UBWC_VER_10) -#define IS_UBWC_20_SUPPORTED(rev) \ - IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_UBWC_VER_20) -#define IS_UBWC_30_SUPPORTED(rev) \ - IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_UBWC_VER_30) + +#define IS_UBWC_10_SUPPORTED(rev) false +#define IS_UBWC_20_SUPPORTED(rev) false +#define IS_UBWC_30_SUPPORTED(rev) true /** * SDE INTERRUPTS - maintains the possible hw irq's allowed by HW @@ -652,10 +628,12 @@ enum sde_clk_ctrl_type { /* struct sde_clk_ctrl_reg : Clock control register * @reg_off: register offset * @bit_off: bit offset + * @val: current bit value */ struct sde_clk_ctrl_reg { u32 reg_off; u32 bit_off; + int val; }; /* struct sde_mdp_cfg : MDP TOP-BLK instance info diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.c b/drivers/gpu/drm/msm/sde/sde_hw_rot.c index f282778607d8..24c6a3a15141 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_rot.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.c @@ -61,16 +61,6 @@ static struct sde_rot_cfg *_rot_offset(enum sde_rot rot, return ERR_PTR(-EINVAL); } -/** - * _sde_hw_rot_reg_dump - perform register dump - * @ptr: private pointer to rotator platform device - * return: None - */ -static void _sde_hw_rot_reg_dump(void *ptr) -{ - sde_rotator_inline_reg_dump((struct platform_device *) ptr); -} - /** * sde_hw_rot_start - start rotator before any commit * @hw: Pointer to rotator hardware driver @@ -88,10 +78,6 @@ static int sde_hw_rot_start(struct sde_hw_rot *hw) pdev = hw->caps->pdev; - rc = sde_dbg_reg_register_cb(hw->name, _sde_hw_rot_reg_dump, pdev); - if (rc) - SDE_ERROR("failed to register debug dump %d\n", rc); - hw->rot_ctx = sde_rotator_inline_open(pdev); if (IS_ERR_OR_NULL(hw->rot_ctx)) { rc = PTR_ERR(hw->rot_ctx); @@ -116,9 +102,6 @@ static void sde_hw_rot_stop(struct sde_hw_rot *hw) sde_rotator_inline_release(hw->rot_ctx); hw->rot_ctx = NULL; - - sde_dbg_reg_unregister_cb(hw->name, _sde_hw_rot_reg_dump, - hw->caps->pdev); } /** diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c index c5a1fac1e362..d890183949e6 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_top.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c @@ -159,6 +159,7 @@ static void sde_hw_setup_pp_split(struct sde_hw_mdp *mdp, static bool sde_hw_setup_clk_force_ctrl(struct sde_hw_mdp *mdp, enum sde_clk_ctrl_type clk_ctrl, bool enable) { + struct sde_clk_ctrl_reg *ctrl_reg; struct sde_hw_blk_reg_map *c; u32 reg_off, bit_off; u32 reg_val, new_val; @@ -172,8 +173,12 @@ static bool sde_hw_setup_clk_force_ctrl(struct sde_hw_mdp *mdp, if (clk_ctrl <= SDE_CLK_CTRL_NONE || clk_ctrl >= SDE_CLK_CTRL_MAX) return false; - reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off; - bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off; + ctrl_reg = (struct sde_clk_ctrl_reg *)&mdp->caps->clk_ctrls[clk_ctrl]; + if (cmpxchg(&ctrl_reg->val, !enable, enable) == enable) + return enable; + + reg_off = ctrl_reg->reg_off; + bit_off = ctrl_reg->bit_off; reg_val = SDE_REG_READ(c, reg_off); @@ -558,11 +563,6 @@ struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx, goto blk_init_error; } - sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, - mdp->hw.blk_off, mdp->hw.blk_off + mdp->hw.length, - mdp->hw.xin_id); - sde_dbg_set_sde_top_offset(mdp->hw.blk_off); - return mdp; blk_init_error: diff --git a/drivers/gpu/drm/msm/sde/sde_hw_util.c b/drivers/gpu/drm/msm/sde/sde_hw_util.c index 013a44263d92..fe4d4ccfcd30 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_util.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_util.c @@ -79,10 +79,6 @@ void sde_reg_write(struct sde_hw_blk_reg_map *c, u32 val, const char *name) { - /* don't need to mutex protect this */ - if (c->log_mask & sde_hw_util_log_mask) - SDE_DEBUG_DRIVER("[%s:0x%X] <= 0x%X\n", - name, c->blk_off + reg_off, val); writel_relaxed(val, c->base_off + c->blk_off + reg_off); } diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index 980fd6cb0345..15d9a5542867 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -81,6 +81,7 @@ static const char * const iommu_ports[] = { #define SDE_KMS_MODESET_LOCK_TIMEOUT_US 500 #define SDE_KMS_MODESET_LOCK_MAX_TRIALS 20 +#define SDE_KMS_PM_QOS_CPU_DMA_LATENCY 300 /** * sdecustom - enable certain driver customizations for sde clients * Enabling this modifies the standard DRM behavior slightly and assumes @@ -367,32 +368,9 @@ static void _sde_debugfs_destroy(struct sde_kms *sde_kms) static int sde_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { int ret = 0; - struct sde_kms *sde_kms; - struct msm_drm_private *priv; - struct sde_crtc *sde_crtc; - struct drm_encoder *drm_enc; - - sde_kms = to_sde_kms(kms); - priv = sde_kms->dev->dev_private; - sde_crtc = to_sde_crtc(crtc); SDE_ATRACE_BEGIN("sde_kms_enable_vblank"); - - if (sde_crtc->vblank_requested == false) { - SDE_ATRACE_BEGIN("sde_encoder_trigger_early_wakeup"); - drm_for_each_encoder(drm_enc, crtc->dev) - sde_encoder_trigger_early_wakeup(drm_enc); - - if (sde_kms->first_kickoff) { - sde_power_scale_reg_bus(&priv->phandle, - sde_kms->core_client, - VOTE_INDEX_HIGH, false); - } - SDE_ATRACE_END("sde_encoder_trigger_early_wakeup"); - } - ret = sde_crtc_vblank(crtc, true); - SDE_ATRACE_END("sde_kms_enable_vblank"); return ret; @@ -3195,6 +3173,59 @@ static void _sde_kms_set_lutdma_vbif_remap(struct sde_kms *sde_kms) sde_vbif_set_qos_remap(sde_kms, &qos_params); } +static void sde_kms_update_pm_qos_irq_request(struct sde_kms *sde_kms) +{ + struct pm_qos_request *req; + + req = &sde_kms->pm_qos_irq_req; + req->type = PM_QOS_REQ_AFFINE_CORES; + req->cpus_affine = sde_kms->irq_cpu_mask; + + if (pm_qos_request_active(req)) + pm_qos_update_request(req, SDE_KMS_PM_QOS_CPU_DMA_LATENCY); + else if (!cpumask_empty(&req->cpus_affine)) { + /** If request is not active yet and mask is not empty + * then it needs to be added initially + */ + pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY, + SDE_KMS_PM_QOS_CPU_DMA_LATENCY); + } +} + +static void sde_kms_set_default_pm_qos_irq_request(struct sde_kms *sde_kms) +{ + if (pm_qos_request_active(&sde_kms->pm_qos_irq_req)) + pm_qos_update_request(&sde_kms->pm_qos_irq_req, + PM_QOS_DEFAULT_VALUE); +} + +static void sde_kms_irq_affinity_notify( + struct irq_affinity_notify *affinity_notify, + const cpumask_t *mask) +{ + struct msm_drm_private *priv; + struct sde_kms *sde_kms = container_of(affinity_notify, + struct sde_kms, affinity_notify); + + if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) + return; + + priv = sde_kms->dev->dev_private; + + mutex_lock(&priv->phandle.phandle_lock); + + // save irq cpu mask + sde_kms->irq_cpu_mask = *mask; + + // request vote with updated irq cpu mask + if (sde_kms->irq_enabled) + sde_kms_update_pm_qos_irq_request(sde_kms); + + mutex_unlock(&priv->phandle.phandle_lock); +} + +static void sde_kms_irq_affinity_release(struct kref *ref) {} + static void sde_kms_handle_power_event(u32 event_type, void *usr) { struct sde_kms *sde_kms = usr; @@ -3213,7 +3244,9 @@ static void sde_kms_handle_power_event(u32 event_type, void *usr) sde_kms_init_shared_hw(sde_kms); _sde_kms_set_lutdma_vbif_remap(sde_kms); sde_kms->first_kickoff = true; + sde_kms_update_pm_qos_irq_request(sde_kms); } else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) { + sde_kms_set_default_pm_qos_irq_request(sde_kms); sde_irq_update(msm_kms, false); sde_kms->first_kickoff = false; } @@ -3362,7 +3395,7 @@ static int sde_kms_hw_init(struct msm_kms *kms) struct msm_drm_private *priv; struct sde_rm *rm = NULL; struct platform_device *platformdev; - int i, rc = -EINVAL; + int i, irq_num, rc = -EINVAL; if (!kms) { SDE_ERROR("invalid kms\n"); @@ -3670,6 +3703,13 @@ static int sde_kms_hw_init(struct msm_kms *kms) } return 0; + sde_kms->affinity_notify.notify = sde_kms_irq_affinity_notify; + sde_kms->affinity_notify.release = sde_kms_irq_affinity_release; + + irq_num = platform_get_irq(to_platform_device(sde_kms->dev->dev), 0); + SDE_DEBUG("Registering for notification of irq_num: %d\n", irq_num); + irq_set_affinity_notifier(irq_num, &sde_kms->affinity_notify); + genpd_err: drm_obj_init_err: sde_core_perf_destroy(&sde_kms->perf); @@ -3741,3 +3781,29 @@ int sde_kms_handle_recovery(struct drm_encoder *encoder) SDE_EVT32(DRMID(encoder), MSM_ENC_ACTIVE_REGION); return sde_encoder_wait_for_event(encoder, MSM_ENC_ACTIVE_REGION); } + +void sde_kms_trigger_early_wakeup(struct sde_kms *sde_kms, + struct drm_crtc *crtc) +{ + struct msm_drm_private *priv; + struct drm_encoder *drm_enc; + + if (!sde_kms || !crtc) { + SDE_ERROR("invalid argument sde_kms %pK crtc %pK\n", + sde_kms, crtc); + return; + } + + priv = sde_kms->dev->dev_private; + + SDE_ATRACE_BEGIN("sde_kms_trigger_early_wakeup"); + drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) + sde_encoder_trigger_early_wakeup(drm_enc); + + if (sde_kms->first_kickoff) { + sde_power_scale_reg_bus(&priv->phandle, + sde_kms->core_client, + VOTE_INDEX_HIGH, false); + } + SDE_ATRACE_END("sde_kms_trigger_early_wakeup"); +} diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h index 1001bc1a48be..685056ff5c20 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.h +++ b/drivers/gpu/drm/msm/sde/sde_kms.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark * @@ -48,10 +48,7 @@ */ #define SDE_DEBUG(fmt, ...) \ do { \ - if (unlikely(drm_debug & DRM_UT_KMS)) \ - DRM_DEBUG(fmt, ##__VA_ARGS__); \ - else \ - pr_debug(fmt, ##__VA_ARGS__); \ + no_printk(fmt, ##__VA_ARGS__); \ } while (0) /** @@ -72,10 +69,7 @@ */ #define SDE_DEBUG_DRIVER(fmt, ...) \ do { \ - if (unlikely(drm_debug & DRM_UT_DRIVER)) \ - DRM_ERROR(fmt, ##__VA_ARGS__); \ - else \ - pr_debug(fmt, ##__VA_ARGS__); \ + no_printk(fmt, ##__VA_ARGS__); \ } while (0) #define SDE_ERROR(fmt, ...) pr_err("[sde error]" fmt, ##__VA_ARGS__) @@ -275,6 +269,10 @@ struct sde_kms { bool first_kickoff; bool qdss_enabled; + + cpumask_t irq_cpu_mask; + struct pm_qos_request pm_qos_irq_req; + struct irq_affinity_notify affinity_notify; }; struct vsync_info { @@ -680,5 +678,12 @@ int sde_kms_handle_recovery(struct drm_encoder *encoder); */ void sde_kms_release_splash_resource(struct sde_kms *sde_kms, struct drm_crtc *crtc); +/** + * sde_kms_trigger_early_wakeup - trigger early wake up + * @sde_kms: pointer to sde_kms structure + * @crtc: pointer to drm_crtc structure + */ +void sde_kms_trigger_early_wakeup(struct sde_kms *sde_kms, + struct drm_crtc *crtc); #endif /* __sde_kms_H__ */ diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h index 40ba362c5b1b..04bb9dd925e7 100644 --- a/drivers/gpu/drm/msm/sde_dbg.h +++ b/drivers/gpu/drm/msm/sde_dbg.h @@ -175,7 +175,7 @@ extern struct sde_dbg_evtlog *sde_dbg_base_evtlog; #define SDE_DBG_CTRL(...) sde_dbg_ctrl(__func__, ##__VA_ARGS__, \ SDE_DBG_DUMP_DATA_LIMITER) -#if defined(CONFIG_DEBUG_FS) +#if 0 /** * sde_evtlog_init - allocate a new event log object diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c index 859f6a336b90..bd3d432f074f 100644 --- a/drivers/gpu/drm/msm/sde_rsc.c +++ b/drivers/gpu/drm/msm/sde_rsc.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -303,24 +303,6 @@ enum sde_rsc_state get_sde_rsc_current_state(int rsc_index) } EXPORT_SYMBOL(get_sde_rsc_current_state); -u32 get_sde_rsc_version(int rsc_index) -{ - struct sde_rsc_priv *rsc; - - if (rsc_index >= MAX_RSC_COUNT) { - pr_err("invalid rsc index:%d\n", rsc_index); - return 0; - } else if (!rsc_prv_list[rsc_index]) { - pr_err("rsc idx:%d not probed yet or not available\n", - rsc_index); - return 0; - } - - rsc = rsc_prv_list[rsc_index]; - return rsc->version; -} -EXPORT_SYMBOL(get_sde_rsc_version); - static int sde_rsc_clk_enable(struct sde_power_handle *phandle, struct sde_power_client *pclient, bool enable) { @@ -496,7 +478,7 @@ static u32 sde_rsc_timer_calculate(struct sde_rsc_priv *rsc, return ret; } -static int sde_rsc_switch_to_cmd_v3(struct sde_rsc_priv *rsc, +static int sde_rsc_switch_to_cmd(struct sde_rsc_priv *rsc, struct sde_rsc_cmd_config *config, struct sde_rsc_client *caller_client, int *wait_vblank_crtc_id) @@ -568,78 +550,7 @@ end: return rc; } -static int sde_rsc_switch_to_cmd_v2(struct sde_rsc_priv *rsc, - struct sde_rsc_cmd_config *config, - struct sde_rsc_client *caller_client, - int *wait_vblank_crtc_id) -{ - struct sde_rsc_client *client; - int rc = STATE_UPDATE_NOT_ALLOWED; - - if (!rsc->primary_client) { - pr_err("primary client not available for cmd state switch\n"); - rc = -EINVAL; - goto end; - } else if (caller_client != rsc->primary_client) { - pr_err("primary client state:%d not cmd state request\n", - rsc->primary_client->current_state); - rc = -EINVAL; - goto end; - } - - /* update timers - might not be available at next switch */ - if (config) - sde_rsc_timer_calculate(rsc, config, SDE_RSC_CMD_STATE); - - /** - * rsc clients can still send config at any time. If a config is - * received during cmd_state then vsync_wait will execute with the logic - * below. If a config is received when rsc is in AMC mode; A mode - * switch will do the vsync wait. updated checks still support all cases - * for dynamic mode switch and inline rotation. - */ - if (rsc->current_state == SDE_RSC_CMD_STATE) { - rc = 0; - if (config) - goto vsync_wait; - else - goto end; - } - - /* any one client in video state blocks the cmd state switch */ - list_for_each_entry(client, &rsc->client_list, list) - if (client->current_state == SDE_RSC_VID_STATE) - goto end; - - if (rsc->hw_ops.state_update) { - rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CMD_STATE); - if (!rc) - rpmh_mode_solver_set(rsc->disp_rsc, true); - } - -vsync_wait: - /* indicate wait for vsync for vid to cmd state switch & cfg update */ - if (!rc && (rsc->current_state == SDE_RSC_VID_STATE || - rsc->current_state == SDE_RSC_CMD_STATE)) { - /* clear VSYNC timestamp for indication when update completes */ - if (rsc->hw_ops.hw_vsync) - rsc->hw_ops.hw_vsync(rsc, VSYNC_ENABLE, NULL, 0, 0); - if (!wait_vblank_crtc_id) { - pr_err("invalid crtc id wait pointer, client %d\n", - caller_client->id); - SDE_EVT32(caller_client->id, rsc->current_state, - caller_client->crtc_id, - wait_vblank_crtc_id, SDE_EVTLOG_ERROR); - msleep(PRIMARY_VBLANK_WORST_CASE_MS); - } else { - *wait_vblank_crtc_id = rsc->primary_client->crtc_id; - } - } -end: - return rc; -} - -static int sde_rsc_switch_to_clk_v3(struct sde_rsc_priv *rsc, +static int sde_rsc_switch_to_clk(struct sde_rsc_priv *rsc, int *wait_vblank_crtc_id) { struct sde_rsc_client *client; @@ -707,62 +618,7 @@ end: return rc; } -static int sde_rsc_switch_to_clk_v2(struct sde_rsc_priv *rsc, - int *wait_vblank_crtc_id) -{ - struct sde_rsc_client *client; - int rc = STATE_UPDATE_NOT_ALLOWED; - - list_for_each_entry(client, &rsc->client_list, list) - if ((client->current_state == SDE_RSC_VID_STATE) || - (client->current_state == SDE_RSC_CMD_STATE)) - goto end; - - if (rsc->hw_ops.state_update) { - rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CLK_STATE); - if (!rc) - rpmh_mode_solver_set(rsc->disp_rsc, false); - } - - /* indicate wait for vsync for cmd to clk state switch */ - if (!rc && rsc->primary_client && - (rsc->current_state == SDE_RSC_CMD_STATE)) { - /* clear VSYNC timestamp for indication when update completes */ - if (rsc->hw_ops.hw_vsync) - rsc->hw_ops.hw_vsync(rsc, VSYNC_ENABLE, NULL, 0, 0); - if (!wait_vblank_crtc_id) { - pr_err("invalid crtc id wait pointer provided\n"); - msleep(PRIMARY_VBLANK_WORST_CASE_MS); - } else { - *wait_vblank_crtc_id = rsc->primary_client->crtc_id; - - /* increase refcount, so we wait for the next vsync */ - atomic_inc(&rsc->rsc_vsync_wait); - SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait)); - } - } else if (atomic_read(&rsc->rsc_vsync_wait)) { - SDE_EVT32(rsc->primary_client, rsc->current_state, - atomic_read(&rsc->rsc_vsync_wait)); - - /* Wait for the vsync, if the refcount is set */ - rc = wait_event_timeout(rsc->rsc_vsync_waitq, - atomic_read(&rsc->rsc_vsync_wait) == 0, - msecs_to_jiffies(PRIMARY_VBLANK_WORST_CASE_MS*2)); - if (!rc) { - pr_err("Timeout waiting for vsync\n"); - rc = -ETIMEDOUT; - SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait), rc, - SDE_EVTLOG_ERROR); - } else { - SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait), rc); - rc = 0; - } - } -end: - return rc; -} - -static int sde_rsc_switch_to_vid_v3(struct sde_rsc_priv *rsc, +static int sde_rsc_switch_to_vid(struct sde_rsc_priv *rsc, struct sde_rsc_cmd_config *config, struct sde_rsc_client *caller_client, int *wait_vblank_crtc_id) @@ -833,68 +689,7 @@ end: return rc; } -static int sde_rsc_switch_to_vid_v2(struct sde_rsc_priv *rsc, - struct sde_rsc_cmd_config *config, - struct sde_rsc_client *caller_client, - int *wait_vblank_crtc_id) -{ - int rc = 0; - - /* update timers - might not be available at next switch */ - if (config && (caller_client == rsc->primary_client)) - sde_rsc_timer_calculate(rsc, config, SDE_RSC_VID_STATE); - - /* early exit without vsync wait for vid state */ - if (rsc->current_state == SDE_RSC_VID_STATE) - goto end; - - /* video state switch should be done immediately */ - if (rsc->hw_ops.state_update) { - rc = rsc->hw_ops.state_update(rsc, SDE_RSC_VID_STATE); - if (!rc) - rpmh_mode_solver_set(rsc->disp_rsc, false); - } - - /* indicate wait for vsync for cmd to vid state switch */ - if (!rc && rsc->primary_client && - (rsc->current_state == SDE_RSC_CMD_STATE)) { - /* clear VSYNC timestamp for indication when update completes */ - if (rsc->hw_ops.hw_vsync) - rsc->hw_ops.hw_vsync(rsc, VSYNC_ENABLE, NULL, 0, 0); - if (!wait_vblank_crtc_id) { - pr_err("invalid crtc id wait pointer provided\n"); - msleep(PRIMARY_VBLANK_WORST_CASE_MS); - } else { - *wait_vblank_crtc_id = rsc->primary_client->crtc_id; - - /* increase refcount, so we wait for the next vsync */ - atomic_inc(&rsc->rsc_vsync_wait); - SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait)); - } - } else if (atomic_read(&rsc->rsc_vsync_wait)) { - SDE_EVT32(rsc->primary_client, rsc->current_state, - atomic_read(&rsc->rsc_vsync_wait)); - - /* Wait for the vsync, if the refcount is set */ - rc = wait_event_timeout(rsc->rsc_vsync_waitq, - atomic_read(&rsc->rsc_vsync_wait) == 0, - msecs_to_jiffies(PRIMARY_VBLANK_WORST_CASE_MS*2)); - if (!rc) { - pr_err("Timeout waiting for vsync\n"); - rc = -ETIMEDOUT; - SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait), rc, - SDE_EVTLOG_ERROR); - } else { - SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait), rc); - rc = 0; - } - } - -end: - return rc; -} - -static int sde_rsc_switch_to_idle_v3(struct sde_rsc_priv *rsc, +static int sde_rsc_switch_to_idle(struct sde_rsc_priv *rsc, struct sde_rsc_cmd_config *config, struct sde_rsc_client *caller_client, int *wait_vblank_crtc_id) @@ -914,7 +709,7 @@ static int sde_rsc_switch_to_idle_v3(struct sde_rsc_priv *rsc, client->client_type == SDE_RSC_EXTERNAL_DISP_CLIENT) multi_display_active = true; else if (client->current_state == SDE_RSC_CLK_STATE && - client->client_type == SDE_RSC_CLK_CLIENT) + client->client_type == SDE_RSC_CLK_CLIENT) clk_client_active = true; else if (client->current_state == SDE_RSC_VID_STATE) vid_display_active = true; @@ -927,20 +722,18 @@ static int sde_rsc_switch_to_idle_v3(struct sde_rsc_priv *rsc, pr_debug("multi_display:%d clk_client:%d vid_display:%d cmd_display:%d\n", multi_display_active, clk_client_active, vid_display_active, cmd_display_active); - if (vid_display_active && !multi_display_active && - rsc->state_ops.switch_to_vid) { - rc = rsc->state_ops.switch_to_vid(rsc, NULL, - rsc->primary_client, wait_vblank_crtc_id); + if (vid_display_active && !multi_display_active) { + rc = sde_rsc_switch_to_vid(rsc, NULL, rsc->primary_client, + wait_vblank_crtc_id); if (!rc) rc = VID_MODE_SWITCH_SUCCESS; - } else if (cmd_display_active && !multi_display_active && - rsc->state_ops.switch_to_cmd) { - rc = rsc->state_ops.switch_to_cmd(rsc, NULL, - rsc->primary_client, wait_vblank_crtc_id); + } else if (cmd_display_active && !multi_display_active) { + rc = sde_rsc_switch_to_cmd(rsc, NULL, rsc->primary_client, + wait_vblank_crtc_id); if (!rc) rc = CMD_MODE_SWITCH_SUCCESS; - } else if (clk_client_active && rsc->state_ops.switch_to_clk) { - rc = rsc->state_ops.switch_to_clk(rsc, wait_vblank_crtc_id); + } else if (clk_client_active) { + rc = sde_rsc_switch_to_clk(rsc, wait_vblank_crtc_id); if (!rc) rc = CLK_MODE_SWITCH_SUCCESS; } else if (rsc->hw_ops.state_update) { @@ -952,55 +745,6 @@ static int sde_rsc_switch_to_idle_v3(struct sde_rsc_priv *rsc, return rc; } -static int sde_rsc_switch_to_idle_v2(struct sde_rsc_priv *rsc, - struct sde_rsc_cmd_config *config, - struct sde_rsc_client *caller_client, - int *wait_vblank_crtc_id) -{ - struct sde_rsc_client *client; - int rc = STATE_UPDATE_NOT_ALLOWED; - bool clk_client_active = false; - bool vid_display_active = false, cmd_display_active = false; - - /* - * following code needs to run the loop through each - * client because they might be in different order - * sorting is not possible; only preference is available - */ - list_for_each_entry(client, &rsc->client_list, list) { - if (client->current_state == SDE_RSC_CLK_STATE && - client->client_type == SDE_RSC_CLK_CLIENT) - clk_client_active = true; - else if (client->current_state == SDE_RSC_VID_STATE) - vid_display_active = true; - else if (client->current_state == SDE_RSC_CMD_STATE) - cmd_display_active = true; - pr_debug("client state:%d type:%d\n", - client->current_state, client->client_type); - } - - pr_debug("clk_client:%d vid_display:%d cmd_display:%d\n", - clk_client_active, vid_display_active, - cmd_display_active); - if (vid_display_active) { - return rc; - } else if (cmd_display_active && rsc->state_ops.switch_to_cmd) { - rc = rsc->state_ops.switch_to_cmd(rsc, NULL, - rsc->primary_client, wait_vblank_crtc_id); - if (!rc) - rc = CMD_MODE_SWITCH_SUCCESS; - } else if (clk_client_active && rsc->state_ops.switch_to_clk) { - rc = rsc->state_ops.switch_to_clk(rsc, wait_vblank_crtc_id); - if (!rc) - rc = CLK_MODE_SWITCH_SUCCESS; - } else if (rsc->hw_ops.state_update) { - rc = rsc->hw_ops.state_update(rsc, SDE_RSC_IDLE_STATE); - if (!rc) - rpmh_mode_solver_set(rsc->disp_rsc, true); - } - - return rc; -} /** * sde_rsc_client_get_vsync_refcount() - returns the status of the vsync * refcount, to signal if the client needs to reset the refcounting logic @@ -1197,39 +941,33 @@ int sde_rsc_client_state_update(struct sde_rsc_client *caller_client, switch (state) { case SDE_RSC_IDLE_STATE: - if (rsc->state_ops.switch_to_idle) { - rc = rsc->state_ops.switch_to_idle(rsc, NULL, - rsc->primary_client, wait_vblank_crtc_id); + rc = sde_rsc_switch_to_idle(rsc, NULL, rsc->primary_client, + wait_vblank_crtc_id); - if (rc == CMD_MODE_SWITCH_SUCCESS) { - state = SDE_RSC_CMD_STATE; - rc = 0; - } else if (rc == VID_MODE_SWITCH_SUCCESS) { - state = SDE_RSC_VID_STATE; - rc = 0; - } else if (rc == CLK_MODE_SWITCH_SUCCESS) { - state = SDE_RSC_CLK_STATE; - rc = 0; - } + if (rc == CMD_MODE_SWITCH_SUCCESS) { + state = SDE_RSC_CMD_STATE; + rc = 0; + } else if (rc == VID_MODE_SWITCH_SUCCESS) { + state = SDE_RSC_VID_STATE; + rc = 0; + } else if (rc == CLK_MODE_SWITCH_SUCCESS) { + state = SDE_RSC_CLK_STATE; + rc = 0; } break; case SDE_RSC_CMD_STATE: - if (rsc->state_ops.switch_to_cmd) - rc = rsc->state_ops.switch_to_cmd(rsc, config, - caller_client, wait_vblank_crtc_id); + rc = sde_rsc_switch_to_cmd(rsc, config, caller_client, + wait_vblank_crtc_id); break; case SDE_RSC_VID_STATE: - if (rsc->state_ops.switch_to_vid) - rc = rsc->state_ops.switch_to_vid(rsc, config, - caller_client, wait_vblank_crtc_id); + rc = sde_rsc_switch_to_vid(rsc, config, caller_client, + wait_vblank_crtc_id); break; case SDE_RSC_CLK_STATE: - if (rsc->state_ops.switch_to_clk) - rc = rsc->state_ops.switch_to_clk(rsc, - wait_vblank_crtc_id); + rc = sde_rsc_switch_to_clk(rsc, wait_vblank_crtc_id); break; default: @@ -1375,7 +1113,7 @@ clk_enable_fail: } EXPORT_SYMBOL(sde_rsc_client_trigger_vote); -#if defined(CONFIG_DEBUG_FS) +#if 0 void sde_rsc_debug_dump(u32 mux_sel) { struct sde_rsc_priv *rsc; @@ -1800,10 +1538,6 @@ static int sde_rsc_probe(struct platform_device *pdev) + RSC_MODE_INSTRUCTION_TIME; rsc->backoff_time_ns = RSC_MODE_INSTRUCTION_TIME; rsc->mode_threshold_time_ns = rsc->time_slot_0_ns; - rsc->state_ops.switch_to_idle = sde_rsc_switch_to_idle_v3; - rsc->state_ops.switch_to_clk = sde_rsc_switch_to_clk_v3; - rsc->state_ops.switch_to_cmd = sde_rsc_switch_to_cmd_v3; - rsc->state_ops.switch_to_vid = sde_rsc_switch_to_vid_v3; } else { rsc->time_slot_0_ns = (rsc->single_tcs_execution_time * 2) + RSC_MODE_INSTRUCTION_TIME; @@ -1811,10 +1545,6 @@ static int sde_rsc_probe(struct platform_device *pdev) + RSC_MODE_INSTRUCTION_TIME; rsc->mode_threshold_time_ns = rsc->backoff_time_ns + RSC_MODE_THRESHOLD_OVERHEAD; - rsc->state_ops.switch_to_idle = sde_rsc_switch_to_idle_v2; - rsc->state_ops.switch_to_clk = sde_rsc_switch_to_clk_v2; - rsc->state_ops.switch_to_cmd = sde_rsc_switch_to_cmd_v2; - rsc->state_ops.switch_to_vid = sde_rsc_switch_to_vid_v2; } ret = sde_power_resource_init(pdev, &rsc->phandle); diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c index b37f28620583..e7d506420fa4 100644 --- a/drivers/gpu/drm/msm/sde_rsc_hw.c +++ b/drivers/gpu/drm/msm/sde_rsc_hw.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -413,21 +413,19 @@ int sde_rsc_mode2_exit(struct sde_rsc_priv *rsc, enum sde_rsc_state state) dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_SOLVER_MODES_ENABLED_DRV0, 0x3, rsc->debug_mode); - if (rsc->version >= SDE_RSC_REV_3) { - reg = dss_reg_r(&rsc->wrapper_io, + reg = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode); - reg &= ~(BIT(0) | BIT(8)); - dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL, + reg &= ~(BIT(0) | BIT(8)); + dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL, reg, rsc->debug_mode); - wmb(); /* make sure to disable rsc solver state */ + wmb(); /* make sure to disable rsc solver state */ - reg = dss_reg_r(&rsc->wrapper_io, + reg = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode); - reg |= (BIT(0) | BIT(8)); - dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL, + reg |= (BIT(0) | BIT(8)); + dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL, reg, rsc->debug_mode); - wmb(); /* make sure to enable rsc solver state */ - } + wmb(); /* make sure to enable rsc solver state */ rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_RESTORE); diff --git a/drivers/gpu/drm/msm/sde_rsc_priv.h b/drivers/gpu/drm/msm/sde_rsc_priv.h index 1e43eae8724d..e89bb7aae00d 100644 --- a/drivers/gpu/drm/msm/sde_rsc_priv.h +++ b/drivers/gpu/drm/msm/sde_rsc_priv.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -32,6 +32,10 @@ #define MAX_COUNT_SIZE_SUPPORTED 128 +#define SDE_RSC_REV_1 0x1 +#define SDE_RSC_REV_2 0x2 +#define SDE_RSC_REV_3 0x3 + struct sde_rsc_priv; /** @@ -126,23 +130,6 @@ struct sde_rsc_timer_config { u32 bwi_threshold_time_ns; }; -struct sde_rsc_state_switch_ops { - int (*switch_to_idle)(struct sde_rsc_priv *rsc, - struct sde_rsc_cmd_config *config, - struct sde_rsc_client *caller_client, - int *wait_vblank_crtc_id); - int (*switch_to_vid)(struct sde_rsc_priv *rsc, - struct sde_rsc_cmd_config *config, - struct sde_rsc_client *caller_client, - int *wait_vblank_crtc_id); - int (*switch_to_cmd)(struct sde_rsc_priv *rsc, - struct sde_rsc_cmd_config *config, - struct sde_rsc_client *caller_client, - int *wait_vblank_crtc_id); - int (*switch_to_clk)(struct sde_rsc_priv *rsc, - int *wait_vblank_crtc_id); -}; - /** * struct sde_rsc_bw_config: bandwidth configuration * @@ -222,7 +209,6 @@ struct sde_rsc_priv { struct sde_rsc_cmd_config cmd_config; u32 current_state; u32 vsync_source; - struct sde_rsc_state_switch_ops state_ops; u32 debug_mode; struct dentry *debugfs_root; diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile index b1f075ce037d..dab939205127 100644 --- a/drivers/gpu/msm/Makefile +++ b/drivers/gpu/msm/Makefile @@ -2,14 +2,12 @@ ccflags-y := -Iinclude/linux msm_kgsl_core-y = \ kgsl.o \ - kgsl_trace.o \ kgsl_drawobj.o \ kgsl_ioctl.o \ kgsl_sharedmem.o \ kgsl_pwrctrl.o \ kgsl_pwrscale.o \ kgsl_mmu.o \ - kgsl_snapshot.o \ kgsl_events.o \ kgsl_pool.o \ kgsl_gmu_core.o \ @@ -27,19 +25,7 @@ msm_adreno-y += \ adreno_ringbuffer.o \ adreno_drawctxt.o \ adreno_dispatch.o \ - adreno_snapshot.o \ - adreno_coresight.o \ - adreno_trace.o \ - adreno_a3xx.o \ - adreno_a4xx.o \ - adreno_a5xx.o \ adreno_a6xx.o \ - adreno_a3xx_snapshot.o \ - adreno_a4xx_snapshot.o \ - adreno_a5xx_snapshot.o \ - adreno_a6xx_snapshot.o \ - adreno_a4xx_preempt.o \ - adreno_a5xx_preempt.o \ adreno_a6xx_preempt.o \ adreno_a6xx_gmu.o \ adreno_a6xx_rgmu.o \ @@ -49,7 +35,6 @@ msm_adreno-y += \ adreno_perfcounter.o msm_adreno-$(CONFIG_QCOM_KGSL_IOMMU) += adreno_iommu.o -msm_adreno-$(CONFIG_DEBUG_FS) += adreno_debugfs.o adreno_profile.o msm_adreno-$(CONFIG_COMPAT) += adreno_compat.o msm_kgsl_core-objs = $(msm_kgsl_core-y) diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h index 9385734b3e66..8284051b0613 100644 --- a/drivers/gpu/msm/adreno-gpulist.h +++ b/drivers/gpu/msm/adreno-gpulist.h @@ -14,392 +14,6 @@ #define ANY_ID (~0) static const struct adreno_gpu_core adreno_gpulist[] = { - { - .gpurev = ADRENO_REV_A306, - .core = 3, - .major = 0, - .minor = 6, - .patchid = 0x00, - .features = ADRENO_SOFT_FAULT_DETECT, - .pm4fw_name = "a300_pm4.fw", - .pfpfw_name = "a300_pfp.fw", - .gpudev = &adreno_a3xx_gpudev, - .gmem_size = SZ_128K, - .busy_mask = 0x7FFFFFFE, - }, - { - .gpurev = ADRENO_REV_A306A, - .core = 3, - .major = 0, - .minor = 6, - .patchid = 0x20, - .features = ADRENO_SOFT_FAULT_DETECT, - .pm4fw_name = "a300_pm4.fw", - .pfpfw_name = "a300_pfp.fw", - .gpudev = &adreno_a3xx_gpudev, - .gmem_size = SZ_128K, - .busy_mask = 0x7FFFFFFE, - }, - { - .gpurev = ADRENO_REV_A304, - .core = 3, - .major = 0, - .minor = 4, - .patchid = 0x00, - .features = ADRENO_SOFT_FAULT_DETECT, - .pm4fw_name = "a300_pm4.fw", - .pfpfw_name = "a300_pfp.fw", - .gpudev = &adreno_a3xx_gpudev, - .gmem_size = (SZ_64K + SZ_32K), - .busy_mask = 0x7FFFFFFE, - }, - { - .gpurev = ADRENO_REV_A405, - .core = 4, - .major = 0, - .minor = 5, - .patchid = ANY_ID, - .features = ADRENO_SOFT_FAULT_DETECT, - .pm4fw_name = "a420_pm4.fw", - .pfpfw_name = "a420_pfp.fw", - .gpudev = &adreno_a4xx_gpudev, - .gmem_size = SZ_256K, - .busy_mask = 0x7FFFFFFE, - }, - { - .gpurev = ADRENO_REV_A420, - .core = 4, - .major = 2, - .minor = 0, - .patchid = ANY_ID, - .features = ADRENO_USES_OCMEM | ADRENO_WARM_START | - ADRENO_USE_BOOTSTRAP | ADRENO_SOFT_FAULT_DETECT, - .pm4fw_name = "a420_pm4.fw", - .pfpfw_name = "a420_pfp.fw", - .gpudev = &adreno_a4xx_gpudev, - .gmem_size = (SZ_1M + SZ_512K), - .pm4_jt_idx = 0x901, - .pm4_jt_addr = 0x300, - .pfp_jt_idx = 0x401, - .pfp_jt_addr = 0x400, - .pm4_bstrp_size = 0x06, - .pfp_bstrp_size = 0x28, - .pfp_bstrp_ver = 0x4ff083, - .busy_mask = 0x7FFFFFFE, - }, - { - .gpurev = ADRENO_REV_A430, - .core = 4, - .major = 3, - .minor = 0, - .patchid = ANY_ID, - .features = ADRENO_USES_OCMEM | ADRENO_WARM_START | - ADRENO_USE_BOOTSTRAP | ADRENO_SPTP_PC | ADRENO_PPD | - ADRENO_CONTENT_PROTECTION | ADRENO_PREEMPTION | - ADRENO_SOFT_FAULT_DETECT, - .pm4fw_name = "a420_pm4.fw", - .pfpfw_name = "a420_pfp.fw", - .gpudev = &adreno_a4xx_gpudev, - .gmem_size = (SZ_1M + SZ_512K), - .pm4_jt_idx = 0x901, - .pm4_jt_addr = 0x300, - .pfp_jt_idx = 0x401, - .pfp_jt_addr = 0x400, - .pm4_bstrp_size = 0x06, - .pfp_bstrp_size = 0x28, - .pfp_bstrp_ver = 0x4ff083, - .shader_offset = 0x20000, - .shader_size = 0x10000, - .num_protected_regs = 0x18, - .busy_mask = 0x7FFFFFFE, - }, - { - .gpurev = ADRENO_REV_A418, - .core = 4, - .major = 1, - .minor = 8, - .patchid = ANY_ID, - .features = ADRENO_USES_OCMEM | ADRENO_WARM_START | - ADRENO_USE_BOOTSTRAP | ADRENO_SPTP_PC | - ADRENO_SOFT_FAULT_DETECT, - .pm4fw_name = "a420_pm4.fw", - .pfpfw_name = "a420_pfp.fw", - .gpudev = &adreno_a4xx_gpudev, - .gmem_size = (SZ_512K), - .pm4_jt_idx = 0x901, - .pm4_jt_addr = 0x300, - .pfp_jt_idx = 0x401, - .pfp_jt_addr = 0x400, - .pm4_bstrp_size = 0x06, - .pfp_bstrp_size = 0x28, - .pfp_bstrp_ver = 0x4ff083, - .shader_offset = 0x20000, /* SP and TP addresses */ - .shader_size = 0x10000, - .num_protected_regs = 0x18, - .busy_mask = 0x7FFFFFFE, - }, - { - .gpurev = ADRENO_REV_A530, - .core = 5, - .major = 3, - .minor = 0, - .patchid = 0, - .pm4fw_name = "a530v1_pm4.fw", - .pfpfw_name = "a530v1_pfp.fw", - .gpudev = &adreno_a5xx_gpudev, - .gmem_size = SZ_1M, - .num_protected_regs = 0x20, - .busy_mask = 0xFFFFFFFE, - }, - { - .gpurev = ADRENO_REV_A530, - .core = 5, - .major = 3, - .minor = 0, - .patchid = 1, - .features = ADRENO_GPMU | ADRENO_SPTP_PC | ADRENO_LM | - ADRENO_PREEMPTION | ADRENO_64BIT | - ADRENO_CONTENT_PROTECTION, - .pm4fw_name = "a530_pm4.fw", - .pfpfw_name = "a530_pfp.fw", - .zap_name = "a530_zap", - .gpudev = &adreno_a5xx_gpudev, - .gmem_size = SZ_1M, - .num_protected_regs = 0x20, - .gpmufw_name = "a530_gpmu.fw2", - .gpmu_major = 1, - .gpmu_minor = 0, - .busy_mask = 0xFFFFFFFE, - .lm_major = 3, - .lm_minor = 0, - .gpmu_tsens = 0x00060007, - .max_power = 5448, - .regfw_name = "a530v2_seq.fw2", - }, - { - .gpurev = ADRENO_REV_A530, - .core = 5, - .major = 3, - .minor = 0, - .patchid = ANY_ID, - .features = ADRENO_GPMU | ADRENO_SPTP_PC | ADRENO_LM | - ADRENO_PREEMPTION | ADRENO_64BIT | - ADRENO_CONTENT_PROTECTION, - .pm4fw_name = "a530_pm4.fw", - .pfpfw_name = "a530_pfp.fw", - .zap_name = "a530_zap", - .gpudev = &adreno_a5xx_gpudev, - .gmem_size = SZ_1M, - .num_protected_regs = 0x20, - .gpmufw_name = "a530v3_gpmu.fw2", - .gpmu_major = 1, - .gpmu_minor = 0, - .busy_mask = 0xFFFFFFFE, - .lm_major = 1, - .lm_minor = 0, - .gpmu_tsens = 0x00060007, - .max_power = 5448, - .regfw_name = "a530v3_seq.fw2", - }, - { - .gpurev = ADRENO_REV_A505, - .core = 5, - .major = 0, - .minor = 5, - .patchid = ANY_ID, - .features = ADRENO_PREEMPTION | ADRENO_64BIT, - .pm4fw_name = "a530_pm4.fw", - .pfpfw_name = "a530_pfp.fw", - .gpudev = &adreno_a5xx_gpudev, - .gmem_size = (SZ_128K + SZ_8K), - .num_protected_regs = 0x20, - .busy_mask = 0xFFFFFFFE, - }, - { - .gpurev = ADRENO_REV_A506, - .core = 5, - .major = 0, - .minor = 6, - .patchid = ANY_ID, - .features = ADRENO_PREEMPTION | ADRENO_64BIT | - ADRENO_CONTENT_PROTECTION | ADRENO_CPZ_RETENTION, - .pm4fw_name = "a530_pm4.fw", - .pfpfw_name = "a530_pfp.fw", - .zap_name = "a506_zap", - .gpudev = &adreno_a5xx_gpudev, - .gmem_size = (SZ_128K + SZ_8K), - .num_protected_regs = 0x20, - .busy_mask = 0xFFFFFFFE, - }, - { - .gpurev = ADRENO_REV_A510, - .core = 5, - .major = 1, - .minor = 0, - .patchid = ANY_ID, - .pm4fw_name = "a530_pm4.fw", - .pfpfw_name = "a530_pfp.fw", - .gpudev = &adreno_a5xx_gpudev, - .gmem_size = SZ_256K, - .num_protected_regs = 0x20, - .busy_mask = 0xFFFFFFFE, - }, - { - .gpurev = ADRENO_REV_A540, - .core = 5, - .major = 4, - .minor = 0, - .patchid = 0, - .features = ADRENO_PREEMPTION | ADRENO_64BIT | - ADRENO_CONTENT_PROTECTION | - ADRENO_GPMU | ADRENO_SPTP_PC, - .pm4fw_name = "a530_pm4.fw", - .pfpfw_name = "a530_pfp.fw", - .zap_name = "a540_zap", - .gpudev = &adreno_a5xx_gpudev, - .gmem_size = SZ_1M, - .num_protected_regs = 0x20, - .busy_mask = 0xFFFFFFFE, - .gpmufw_name = "a540_gpmu.fw2", - .gpmu_major = 3, - .gpmu_minor = 0, - .gpmu_tsens = 0x000C000D, - .max_power = 5448, - }, - { - .gpurev = ADRENO_REV_A540, - .core = 5, - .major = 4, - .minor = 0, - .patchid = ANY_ID, - .features = ADRENO_PREEMPTION | ADRENO_64BIT | - ADRENO_CONTENT_PROTECTION | - ADRENO_GPMU | ADRENO_SPTP_PC, - .pm4fw_name = "a530_pm4.fw", - .pfpfw_name = "a530_pfp.fw", - .zap_name = "a540_zap", - .gpudev = &adreno_a5xx_gpudev, - .gmem_size = SZ_1M, - .num_protected_regs = 0x20, - .busy_mask = 0xFFFFFFFE, - .gpmufw_name = "a540_gpmu.fw2", - .gpmu_major = 3, - .gpmu_minor = 0, - .gpmu_tsens = 0x000C000D, - .max_power = 5448, - }, - { - .gpurev = ADRENO_REV_A512, - .core = 5, - .major = 1, - .minor = 2, - .patchid = ANY_ID, - .features = ADRENO_PREEMPTION | ADRENO_64BIT | - ADRENO_CONTENT_PROTECTION | ADRENO_CPZ_RETENTION, - .pm4fw_name = "a530_pm4.fw", - .pfpfw_name = "a530_pfp.fw", - .zap_name = "a512_zap", - .gpudev = &adreno_a5xx_gpudev, - .gmem_size = (SZ_256K + SZ_16K), - .num_protected_regs = 0x20, - .busy_mask = 0xFFFFFFFE, - .cx_ipeak_gpu_freq = 700000000, - }, - { - .gpurev = ADRENO_REV_A508, - .core = 5, - .major = 0, - .minor = 8, - .patchid = ANY_ID, - .features = ADRENO_PREEMPTION | ADRENO_64BIT | - ADRENO_CONTENT_PROTECTION | ADRENO_CPZ_RETENTION, - .pm4fw_name = "a530_pm4.fw", - .pfpfw_name = "a530_pfp.fw", - .zap_name = "a508_zap", - .gpudev = &adreno_a5xx_gpudev, - .gmem_size = (SZ_128K + SZ_8K), - .num_protected_regs = 0x20, - .busy_mask = 0xFFFFFFFE, - }, - { - .gpurev = ADRENO_REV_A630, - .core = 6, - .major = 3, - .minor = 0, - .patchid = 0, - .features = ADRENO_64BIT | ADRENO_RPMH | - ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_LM, - .sqefw_name = "a630_sqe.fw", - .zap_name = "a630_zap", - .gpudev = &adreno_a6xx_gpudev, - .gmem_size = SZ_1M, - .num_protected_regs = 0x20, - .busy_mask = 0xFFFFFFFE, - .gpmufw_name = "a630_gmu.bin", - .gpmu_major = 0x1, - .gpmu_minor = 0x003, - .gpmu_tsens = 0x000C000D, - .max_power = 5448, - }, - { - .gpurev = ADRENO_REV_A630, - .core = 6, - .major = 3, - .minor = 0, - .patchid = ANY_ID, - .features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_IFPC | - ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | - ADRENO_IOCOHERENT | ADRENO_PREEMPTION, - .sqefw_name = "a630_sqe.fw", - .zap_name = "a630_zap", - .gpudev = &adreno_a6xx_gpudev, - .gmem_size = SZ_1M, - .num_protected_regs = 0x20, - .busy_mask = 0xFFFFFFFE, - .gpmufw_name = "a630_gmu.bin", - .gpmu_major = 0x1, - .gpmu_minor = 0x003, - .gpmu_tsens = 0x000C000D, - .max_power = 5448, - }, - { - .gpurev = ADRENO_REV_A615, - .core = 6, - .major = 1, - .minor = 5, - .patchid = ANY_ID, - .features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_PREEMPTION | - ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_IFPC | - ADRENO_IOCOHERENT, - .sqefw_name = "a630_sqe.fw", - .zap_name = "a615_zap", - .gpudev = &adreno_a6xx_gpudev, - .gmem_size = SZ_512K, - .num_protected_regs = 0x20, - .busy_mask = 0xFFFFFFFE, - .gpmufw_name = "a630_gmu.bin", - .gpmu_major = 0x1, - .gpmu_minor = 0x003, - }, - { - .gpurev = ADRENO_REV_A618, - .core = 6, - .major = 1, - .minor = 8, - .patchid = ANY_ID, - .features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_PREEMPTION | - ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_IFPC | - ADRENO_IOCOHERENT, - .sqefw_name = "a630_sqe.fw", - .zap_name = "a615_zap", - .gpudev = &adreno_a6xx_gpudev, - .gmem_size = SZ_512K, - .num_protected_regs = 0x20, - .busy_mask = 0xFFFFFFFE, - .gpmufw_name = "a618_gmu.bin", - .gpmu_major = 0x1, - .gpmu_minor = 0x008, - }, { .gpurev = ADRENO_REV_A640, .core = 6, @@ -444,78 +58,4 @@ static const struct adreno_gpu_core adreno_gpulist[] = { .gpmu_tsens = 0x000C000D, .max_power = 5448, }, - { - .gpurev = ADRENO_REV_A680, - .core = 6, - .major = 8, - .minor = 0, - .patchid = ANY_ID, - .features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU | - ADRENO_CONTENT_PROTECTION | ADRENO_IOCOHERENT | - ADRENO_IFPC, - .sqefw_name = "a630_sqe.fw", - .zap_name = "a640_zap", - .gpudev = &adreno_a6xx_gpudev, - .gmem_size = SZ_2M, - .num_protected_regs = 0x20, - .busy_mask = 0xFFFFFFFE, - .gpmufw_name = "a640_gmu.bin", - .gpmu_major = 0x2, - .gpmu_minor = 0x000, - .gpmu_tsens = 0x000C000D, - .max_power = 5448, - }, - { - .gpurev = ADRENO_REV_A612, - .core = 6, - .major = 1, - .minor = 2, - .patchid = ANY_ID, - .features = ADRENO_64BIT | ADRENO_CONTENT_PROTECTION | - ADRENO_IOCOHERENT | ADRENO_PREEMPTION | ADRENO_GPMU | - ADRENO_IFPC | ADRENO_PERFCTRL_RETAIN, - .sqefw_name = "a630_sqe.fw", - .zap_name = "a612_zap", - .gpudev = &adreno_a6xx_gpudev, - .gmem_size = (SZ_128K + SZ_4K), - .num_protected_regs = 0x20, - .busy_mask = 0xFFFFFFFE, - .gpmufw_name = "a612_rgmu.bin", - .cx_ipeak_gpu_freq = 745000000, - }, - { - .gpurev = ADRENO_REV_A616, - .core = 6, - .major = 1, - .minor = 6, - .patchid = ANY_ID, - .features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_PREEMPTION | - ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_IFPC | - ADRENO_IOCOHERENT, - .sqefw_name = "a630_sqe.fw", - .zap_name = "a615_zap", - .gpudev = &adreno_a6xx_gpudev, - .gmem_size = SZ_512K, - .num_protected_regs = 0x20, - .busy_mask = 0xFFFFFFFE, - .gpmufw_name = "a630_gmu.bin", - .gpmu_major = 0x1, - .gpmu_minor = 0x003, - }, - { - .gpurev = ADRENO_REV_A610, - .core = 6, - .major = 1, - .minor = 0, - .patchid = ANY_ID, - .features = ADRENO_64BIT | ADRENO_PREEMPTION | - ADRENO_CONTENT_PROTECTION, - .sqefw_name = "a630_sqe.fw", - .zap_name = "a610_zap", - .gpudev = &adreno_a6xx_gpudev, - .gmem_size = (SZ_128K + SZ_4K), - .num_protected_regs = 0x20, - .busy_mask = 0xFFFFFFFE, - .cx_ipeak_gpu_freq = 900000000, - }, }; diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 1b8c0115b1ee..7936d6c5620a 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -62,7 +61,7 @@ MODULE_PARM_DESC(swfdetect, "Enable soft fault detection"); #define KGSL_LOG_LEVEL_DEFAULT 3 -static void adreno_input_work(struct work_struct *work); +static void adreno_pwr_on_work(struct work_struct *work); static unsigned int counter_delta(struct kgsl_device *device, unsigned int reg, unsigned int *counter); @@ -103,8 +102,6 @@ static struct adreno_device device_3d0 = { .ft_policy = KGSL_FT_DEFAULT_POLICY, .ft_pf_policy = KGSL_FT_PAGEFAULT_DEFAULT_POLICY, .long_ib_detect = 1, - .input_work = __WORK_INITIALIZER(device_3d0.input_work, - adreno_input_work), .pwrctrl_flag = BIT(ADRENO_HWCG_CTRL) | BIT(ADRENO_THROTTLING_CTRL), .profile.enabled = false, .active_list = LIST_HEAD_INIT(device_3d0.active_list), @@ -116,6 +113,8 @@ static struct adreno_device device_3d0 = { .skipsaverestore = 1, .usesgmem = 1, }, + .pwr_on_work = __WORK_INITIALIZER(device_3d0.pwr_on_work, + adreno_pwr_on_work), }; /* Ptr to array for the current set of fault detect registers */ @@ -137,9 +136,6 @@ static unsigned int adreno_ft_regs_default[] = { /* Nice level for the higher priority GPU start thread */ int adreno_wake_nice = -7; -/* Number of milliseconds to stay active active after a wake on touch */ -unsigned int adreno_wake_timeout = 100; - /** * adreno_readreg64() - Read a 64bit register by getting its offset from the * offset array defined in gpudev node @@ -369,152 +365,17 @@ void adreno_fault_detect_stop(struct adreno_device *adreno_dev) adreno_dev->fast_hang_detect = 0; } -/* - * A workqueue callback responsible for actually turning on the GPU after a - * touch event. kgsl_pwrctrl_change_state(ACTIVE) is used without any - * active_count protection to avoid the need to maintain state. Either - * somebody will start using the GPU or the idle timer will fire and put the - * GPU back into slumber. - */ -static void adreno_input_work(struct work_struct *work) +static void adreno_pwr_on_work(struct work_struct *work) { - struct adreno_device *adreno_dev = container_of(work, - struct adreno_device, input_work); + struct adreno_device *adreno_dev = + container_of(work, typeof(*adreno_dev), pwr_on_work); struct kgsl_device *device = KGSL_DEVICE(adreno_dev); mutex_lock(&device->mutex); - - device->flags |= KGSL_FLAG_WAKE_ON_TOUCH; - - /* - * Don't schedule adreno_start in a high priority workqueue, we are - * already in a workqueue which should be sufficient - */ kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE); - - /* - * When waking up from a touch event we want to stay active long enough - * for the user to send a draw command. The default idle timer timeout - * is shorter than we want so go ahead and push the idle timer out - * further for this special case - */ - mod_timer(&device->idle_timer, - jiffies + msecs_to_jiffies(adreno_wake_timeout)); mutex_unlock(&device->mutex); } -/* - * Process input events and schedule work if needed. At this point we are only - * interested in groking EV_ABS touchscreen events - */ -static void adreno_input_event(struct input_handle *handle, unsigned int type, - unsigned int code, int value) -{ - struct kgsl_device *device = handle->handler->private; - struct adreno_device *adreno_dev = ADRENO_DEVICE(device); - - /* Only consider EV_ABS (touch) events */ - if (type != EV_ABS) - return; - - /* - * Don't do anything if anything hasn't been rendered since we've been - * here before - */ - - if (device->flags & KGSL_FLAG_WAKE_ON_TOUCH) - return; - - /* - * If the device is in nap, kick the idle timer to make sure that we - * don't go into slumber before the first render. If the device is - * already in slumber schedule the wake. - */ - - if (device->state == KGSL_STATE_NAP) { - /* - * Set the wake on touch bit to keep from coming back here and - * keeping the device in nap without rendering - */ - - device->flags |= KGSL_FLAG_WAKE_ON_TOUCH; - - mod_timer(&device->idle_timer, - jiffies + device->pwrctrl.interval_timeout); - } else if (device->state == KGSL_STATE_SLUMBER) { - schedule_work(&adreno_dev->input_work); - } -} - -#ifdef CONFIG_INPUT -static int adreno_input_connect(struct input_handler *handler, - struct input_dev *dev, const struct input_device_id *id) -{ - struct input_handle *handle; - int ret; - - handle = kzalloc(sizeof(*handle), GFP_KERNEL); - if (handle == NULL) - return -ENOMEM; - - handle->dev = dev; - handle->handler = handler; - handle->name = handler->name; - - ret = input_register_handle(handle); - if (ret) { - kfree(handle); - return ret; - } - - ret = input_open_device(handle); - if (ret) { - input_unregister_handle(handle); - kfree(handle); - } - - return ret; -} - -static void adreno_input_disconnect(struct input_handle *handle) -{ - input_close_device(handle); - input_unregister_handle(handle); - kfree(handle); -} -#else -static int adreno_input_connect(struct input_handler *handler, - struct input_dev *dev, const struct input_device_id *id) -{ - return 0; -} -static void adreno_input_disconnect(struct input_handle *handle) {} -#endif - -/* - * We are only interested in EV_ABS events so only register handlers for those - * input devices that have EV_ABS events - */ -static const struct input_device_id adreno_input_ids[] = { - { - .flags = INPUT_DEVICE_ID_MATCH_EVBIT, - .evbit = { BIT_MASK(EV_ABS) }, - /* assumption: MT_.._X & MT_.._Y are in the same long */ - .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] = - BIT_MASK(ABS_MT_POSITION_X) | - BIT_MASK(ABS_MT_POSITION_Y) }, - }, - { }, -}; - -static struct input_handler adreno_input_handler = { - .event = adreno_input_event, - .connect = adreno_input_connect, - .disconnect = adreno_input_disconnect, - .name = "kgsl", - .id_table = adreno_input_ids, -}; - /* * _soft_reset() - Soft reset GPU * @adreno_dev: Pointer to adreno device @@ -697,8 +558,6 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device) tmp &= ~BIT(i); } - gpudev->irq_trace(adreno_dev, status); - /* * Clear ADRENO_INT_RBBM_AHB_ERROR bit after this interrupt has been * cleared in its respective handler @@ -1146,17 +1005,17 @@ static int adreno_of_get_power(struct adreno_device *adreno_dev, /* get pm-qos-active-latency, set it to default if not found */ if (of_property_read_u32(node, "qcom,pm-qos-active-latency", &device->pwrctrl.pm_qos_active_latency)) - device->pwrctrl.pm_qos_active_latency = 501; + device->pwrctrl.pm_qos_active_latency = 1000; /* get pm-qos-cpu-mask-latency, set it to default if not found */ if (of_property_read_u32(node, "qcom,l2pc-cpu-mask-latency", &device->pwrctrl.pm_qos_cpu_mask_latency)) - device->pwrctrl.pm_qos_cpu_mask_latency = 501; + device->pwrctrl.pm_qos_cpu_mask_latency = 1000; /* get pm-qos-wakeup-latency, set it to default if not found */ if (of_property_read_u32(node, "qcom,pm-qos-wakeup-latency", &device->pwrctrl.pm_qos_wakeup_latency)) - device->pwrctrl.pm_qos_wakeup_latency = 101; + device->pwrctrl.pm_qos_wakeup_latency = 100; if (of_property_read_u32(node, "qcom,idle-timeout", &timeout)) timeout = 80; @@ -1449,9 +1308,6 @@ static int adreno_probe(struct platform_device *pdev) kgsl_pwrscale_init(&pdev->dev, CONFIG_QCOM_ADRENO_DEFAULT_GOVERNOR); - /* Initialize coresight for the target */ - adreno_coresight_init(adreno_dev); - /* Get the system cache slice descriptor for GPU */ adreno_dev->gpu_llc_slice = adreno_llc_getd(&pdev->dev, "gpu"); if (IS_ERR(adreno_dev->gpu_llc_slice) && @@ -1468,21 +1324,6 @@ static int adreno_probe(struct platform_device *pdev) "Failed to get gpuhtw LLC slice descriptor %ld\n", PTR_ERR(adreno_dev->gpuhtw_llc_slice)); -#ifdef CONFIG_INPUT - if (!device->pwrctrl.input_disable) { - adreno_input_handler.private = device; - /* - * It isn't fatal if we cannot register the input handler. Sad, - * perhaps, but not fatal - */ - if (input_register_handler(&adreno_input_handler)) { - adreno_input_handler.private = NULL; - KGSL_DRV_ERR(device, - "Unable to register the input handler\n"); - } - } -#endif - place_marker("M - DRIVER GPU Ready"); out: if (status) { @@ -1535,13 +1376,8 @@ static int adreno_remove(struct platform_device *pdev) /* The memory is fading */ _adreno_free_memories(adreno_dev); -#ifdef CONFIG_INPUT - if (adreno_input_handler.private) - input_unregister_handler(&adreno_input_handler); -#endif adreno_sysfs_close(adreno_dev); - adreno_coresight_remove(adreno_dev); adreno_profile_close(adreno_dev); /* Release the system cache slice descriptor */ @@ -1853,13 +1689,6 @@ int adreno_set_unsecured_mode(struct adreno_device *adreno_dev, if (!adreno_is_a5xx(adreno_dev) && !adreno_is_a6xx(adreno_dev)) return -EINVAL; - if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS) && - adreno_is_a5xx(adreno_dev)) { - ret = a5xx_critical_packet_submit(adreno_dev, rb); - if (ret) - return ret; - } - /* GPU comes up in secured mode, make it unsecured by default */ if (adreno_dev->zap_handle_ptr) ret = adreno_switch_to_unsecure_mode(adreno_dev, rb); @@ -2166,9 +1995,6 @@ static int _adreno_start(struct adreno_device *adreno_dev) */ adreno_llc_setup(device); - /* Re-initialize the coresight registers if applicable */ - adreno_coresight_start(adreno_dev); - adreno_irqctrl(adreno_dev, 1); adreno_perfcounter_start(adreno_dev); @@ -2309,9 +2135,6 @@ static int adreno_stop(struct kgsl_device *device) adreno_llc_deactivate_slice(adreno_dev->gpu_llc_slice); adreno_llc_deactivate_slice(adreno_dev->gpuhtw_llc_slice); - /* Save active coresight registers if applicable */ - adreno_coresight_stop(adreno_dev); - /* Save physical performance counter values before GPU power down*/ adreno_perfcounter_save(adreno_dev); @@ -3151,9 +2974,6 @@ int adreno_soft_reset(struct kgsl_device *device) /* Reinitialize the GPU */ gpudev->start(adreno_dev); - /* Re-initialize the coresight registers if applicable */ - adreno_coresight_start(adreno_dev); - /* Enable IRQ */ adreno_irqctrl(adreno_dev, 1); @@ -4287,7 +4107,7 @@ static int __init kgsl_3d_init(void) { #ifdef CONFIG_PLATFORM_AUTO struct task_struct *kgsl_3d_init_task = - kthread_run(__kgsl_3d_init, NULL, "kgsl_3d_init"); + kthread_run_perf_critical(__kgsl_3d_init, NULL, "kgsl_3d_init"); if (IS_ERR(kgsl_3d_init_task)) return PTR_ERR(kgsl_3d_init_task); else diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index 11f7ec68beee..5ce10a4df9fd 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -475,7 +475,7 @@ enum gpu_coresight_sources { * @dispatcher: Container for adreno GPU dispatcher * @pwron_fixup: Command buffer to run a post-power collapse shader workaround * @pwron_fixup_dwords: Number of dwords in the command buffer - * @input_work: Work struct for turning on the GPU after a touch event + * @pwr_on_work: Work struct for turning on the GPU * @busy_data: Struct holding GPU VBIF busy stats * @ram_cycles_lo: Number of DDR clock cycles for the monitor session (Only * DDR channel 0 read cycles in case of GBIF) @@ -555,7 +555,7 @@ struct adreno_device { struct adreno_dispatcher dispatcher; struct kgsl_memdesc pwron_fixup; unsigned int pwron_fixup_dwords; - struct work_struct input_work; + struct work_struct pwr_on_work; struct adreno_busy_data busy_data; unsigned int ram_cycles_lo; unsigned int ram_cycles_lo_ch1_read; @@ -589,7 +589,6 @@ struct adreno_device { unsigned int speed_bin; unsigned int quirks; - struct coresight_device *csdev[GPU_CORESIGHT_MAX]; uint32_t gpmu_throttle_counters[ADRENO_GPMU_THROTTLE_COUNTERS]; struct work_struct irq_storm_work; @@ -968,9 +967,6 @@ struct adreno_gpudev { struct adreno_perfcounters *perfcounters; const struct adreno_invalid_countables *invalid_countables; - struct adreno_snapshot_data *snapshot_data; - - struct adreno_coresight *coresight[GPU_CORESIGHT_MAX]; struct adreno_irq *irq; int num_prio_levels; @@ -979,8 +975,6 @@ struct adreno_gpudev { unsigned int gbif_arb_halt_mask; unsigned int gbif_gx_halt_mask; /* GPU specific function hooks */ - void (*irq_trace)(struct adreno_device *, unsigned int status); - void (*snapshot)(struct adreno_device *, struct kgsl_snapshot *); void (*platform_setup)(struct adreno_device *); void (*init)(struct adreno_device *); void (*remove)(struct adreno_device *); @@ -1035,8 +1029,6 @@ struct adreno_gpudev { int (*perfcounter_update)(struct adreno_device *adreno_dev, struct adreno_perfcount_register *reg, bool update_reg); - size_t (*snapshot_preemption)(struct kgsl_device *, u8 *, - size_t, void *); void (*zap_shader_unload)(struct adreno_device *); int (*secure_pt_hibernate)(struct adreno_device *); int (*secure_pt_restore)(struct adreno_device *); @@ -1120,13 +1112,9 @@ extern unsigned int *adreno_ft_regs; extern unsigned int adreno_ft_regs_num; extern unsigned int *adreno_ft_regs_val; -extern struct adreno_gpudev adreno_a3xx_gpudev; -extern struct adreno_gpudev adreno_a4xx_gpudev; -extern struct adreno_gpudev adreno_a5xx_gpudev; extern struct adreno_gpudev adreno_a6xx_gpudev; extern int adreno_wake_nice; -extern unsigned int adreno_wake_timeout; int adreno_start(struct kgsl_device *device, int priority); int adreno_soft_reset(struct kgsl_device *device); @@ -1154,9 +1142,9 @@ void adreno_shadermem_regread(struct kgsl_device *device, unsigned int offsetwords, unsigned int *value); -void adreno_snapshot(struct kgsl_device *device, +static inline void adreno_snapshot(struct kgsl_device *device, struct kgsl_snapshot *snapshot, - struct kgsl_context *context); + struct kgsl_context *context) {} int adreno_reset(struct kgsl_device *device, int fault); @@ -1213,13 +1201,12 @@ void adreno_cx_misc_regrmw(struct adreno_device *adreno_dev, #define ADRENO_TARGET(_name, _id) \ static inline int adreno_is_##_name(struct adreno_device *adreno_dev) \ { \ - return (ADRENO_GPUREV(adreno_dev) == (_id)); \ + return 0; \ } static inline int adreno_is_a3xx(struct adreno_device *adreno_dev) { - return ((ADRENO_GPUREV(adreno_dev) >= 300) && - (ADRENO_GPUREV(adreno_dev) < 400)); + return 0; } ADRENO_TARGET(a304, ADRENO_REV_A304) @@ -1234,28 +1221,24 @@ ADRENO_TARGET(a330, ADRENO_REV_A330) static inline int adreno_is_a330v2(struct adreno_device *adreno_dev) { - return ((ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A330) && - (ADRENO_CHIPID_PATCH(adreno_dev->chipid) > 0)); + return 0; } static inline int adreno_is_a330v21(struct adreno_device *adreno_dev) { - return ((ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A330) && - (ADRENO_CHIPID_PATCH(adreno_dev->chipid) > 0xF)); + return 0; } static inline int adreno_is_a4xx(struct adreno_device *adreno_dev) { - return ADRENO_GPUREV(adreno_dev) >= 400 && - ADRENO_GPUREV(adreno_dev) < 500; + return 0; } ADRENO_TARGET(a405, ADRENO_REV_A405); static inline int adreno_is_a405v2(struct adreno_device *adreno_dev) { - return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A405) && - (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 0x10); + return 0; } ADRENO_TARGET(a418, ADRENO_REV_A418) @@ -1264,14 +1247,12 @@ ADRENO_TARGET(a430, ADRENO_REV_A430) static inline int adreno_is_a430v2(struct adreno_device *adreno_dev) { - return ((ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A430) && - (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 1)); + return 0; } static inline int adreno_is_a5xx(struct adreno_device *adreno_dev) { - return ADRENO_GPUREV(adreno_dev) >= 500 && - ADRENO_GPUREV(adreno_dev) < 600; + return 0; } ADRENO_TARGET(a505, ADRENO_REV_A505) @@ -1284,51 +1265,48 @@ ADRENO_TARGET(a540, ADRENO_REV_A540) static inline int adreno_is_a530v1(struct adreno_device *adreno_dev) { - return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A530) && - (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 0); + return 0; } static inline int adreno_is_a530v2(struct adreno_device *adreno_dev) { - return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A530) && - (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 1); + return 0; } static inline int adreno_is_a530v3(struct adreno_device *adreno_dev) { - return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A530) && - (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 2); + return 0; } static inline int adreno_is_a505_or_a506(struct adreno_device *adreno_dev) { - return ADRENO_GPUREV(adreno_dev) >= 505 && - ADRENO_GPUREV(adreno_dev) <= 506; + return 0; } static inline int adreno_is_a540v1(struct adreno_device *adreno_dev) { - return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A540) && - (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 0); + return 0; } static inline int adreno_is_a540v2(struct adreno_device *adreno_dev) { - return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A540) && - (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 1); + return 0; } static inline int adreno_is_a6xx(struct adreno_device *adreno_dev) { - return ADRENO_GPUREV(adreno_dev) >= 600 && - ADRENO_GPUREV(adreno_dev) < 700; + return 1; +} + +static inline int adreno_is_a640(struct adreno_device *adreno_dev) +{ + return 1; } ADRENO_TARGET(a610, ADRENO_REV_A610) ADRENO_TARGET(a612, ADRENO_REV_A612) ADRENO_TARGET(a618, ADRENO_REV_A618) ADRENO_TARGET(a630, ADRENO_REV_A630) -ADRENO_TARGET(a640, ADRENO_REV_A640) ADRENO_TARGET(a680, ADRENO_REV_A680) /* @@ -1337,22 +1315,17 @@ ADRENO_TARGET(a680, ADRENO_REV_A680) */ static inline int adreno_is_a615_family(struct adreno_device *adreno_dev) { - unsigned int rev = ADRENO_GPUREV(adreno_dev); - - return (rev == ADRENO_REV_A615 || rev == ADRENO_REV_A616 || - rev == ADRENO_REV_A618); + return 0; } static inline int adreno_is_a630v1(struct adreno_device *adreno_dev) { - return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A630) && - (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 0); + return 0; } static inline int adreno_is_a630v2(struct adreno_device *adreno_dev) { - return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A630) && - (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 1); + return 0; } static inline int adreno_is_a640v1(struct adreno_device *adreno_dev) @@ -1369,14 +1342,12 @@ static inline int adreno_is_a640v2(struct adreno_device *adreno_dev) static inline int adreno_is_a680v1(struct adreno_device *adreno_dev) { - return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A680) && - (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 0); + return 0; } static inline int adreno_is_a680v2(struct adreno_device *adreno_dev) { - return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A680) && - (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 1); + return 0; } /* @@ -1683,16 +1654,10 @@ static inline void adreno_set_protected_registers( *index = *index + 1; } -#ifdef CONFIG_DEBUG_FS -void adreno_debugfs_init(struct adreno_device *adreno_dev); -void adreno_context_debugfs_init(struct adreno_device *adreno_dev, - struct adreno_context *ctx); -#else static inline void adreno_debugfs_init(struct adreno_device *adreno_dev) { } static inline void adreno_context_debugfs_init(struct adreno_device *device, struct adreno_context *context) { } -#endif /** * adreno_compare_pm4_version() - Compare the PM4 microcode version diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c index a780c1bb1600..0506231ea9b7 100644 --- a/drivers/gpu/msm/adreno_a6xx.c +++ b/drivers/gpu/msm/adreno_a6xx.c @@ -503,8 +503,6 @@ static void a6xx_pwrup_reglist_init(struct adreno_device *adreno_dev) static void a6xx_init(struct adreno_device *adreno_dev) { - a6xx_crashdump_init(adreno_dev); - /* * If the GMU is not enabled, rewrite the offset for the always on * counters to point to the CP always on instead of GMU always on @@ -1820,6 +1818,7 @@ static struct adreno_irq a6xx_irq = { .mask = A6XX_INT_MASK, }; +#if 0 static bool adreno_is_qdss_dbg_register(struct kgsl_device *device, unsigned int offsetwords) { @@ -2390,6 +2389,7 @@ static struct adreno_coresight a6xx_coresight_cx = { .read = adreno_cx_dbgc_regread, .write = adreno_cx_dbgc_regwrite, }; +#endif static struct adreno_perfcount_register a6xx_perfcounters_cp[] = { { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_0_LO, @@ -3317,9 +3317,7 @@ static int a6xx_secure_pt_restore(struct adreno_device *adreno_dev) struct adreno_gpudev adreno_a6xx_gpudev = { .reg_offsets = &a6xx_reg_offsets, .start = a6xx_start, - .snapshot = a6xx_snapshot, .irq = &a6xx_irq, - .irq_trace = trace_kgsl_a5xx_irq_status, .num_prio_levels = KGSL_PRIORITY_MAX_RB_LEVELS, .platform_setup = a6xx_platform_setup, .init = a6xx_init, @@ -3352,9 +3350,7 @@ struct adreno_gpudev adreno_a6xx_gpudev = { .ccu_invalidate = a6xx_ccu_invalidate, .perfcounter_init = a6xx_perfcounter_init, .perfcounter_update = a6xx_perfcounter_update, - .coresight = {&a6xx_coresight, &a6xx_coresight_cx}, .clk_set_options = a6xx_clk_set_options, - .snapshot_preemption = a6xx_snapshot_preemption, .zap_shader_unload = a6xx_zap_shader_unload, .secure_pt_hibernate = a6xx_secure_pt_hibernate, .secure_pt_restore = a6xx_secure_pt_restore, diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h index 49085651f208..8807db6333f4 100644 --- a/drivers/gpu/msm/adreno_a6xx.h +++ b/drivers/gpu/msm/adreno_a6xx.h @@ -169,7 +169,5 @@ void a6xx_crashdump_init(struct adreno_device *adreno_dev); int a6xx_gmu_sptprac_enable(struct adreno_device *adreno_dev); void a6xx_gmu_sptprac_disable(struct adreno_device *adreno_dev); bool a6xx_gmu_sptprac_is_on(struct adreno_device *adreno_dev); -size_t a6xx_snapshot_preemption(struct kgsl_device *device, u8 *buf, - size_t remain, void *priv); u64 a6xx_gmu_read_ao_counter(struct kgsl_device *device); #endif diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c index a4639068403b..613d5b6bbc0c 100644 --- a/drivers/gpu/msm/adreno_a6xx_gmu.c +++ b/drivers/gpu/msm/adreno_a6xx_gmu.c @@ -23,7 +23,6 @@ #include "adreno.h" #include "a6xx_reg.h" #include "adreno_a6xx.h" -#include "adreno_snapshot.h" #include "adreno_trace.h" static const unsigned int a6xx_gmu_gx_registers[] = { @@ -1458,134 +1457,6 @@ static unsigned int a6xx_gmu_ifpc_show(struct adreno_device *adreno_dev) gmu->idle_level >= GPU_HW_IFPC; } -static size_t a6xx_snapshot_gmu_tcm(struct kgsl_device *device, - u8 *buf, size_t remain, void *priv) -{ - struct kgsl_snapshot_gmu_mem *mem_hdr = - (struct kgsl_snapshot_gmu_mem *)buf; - unsigned int *data = (unsigned int *)(buf + sizeof(*mem_hdr)); - unsigned int i, bytes; - unsigned int *type = priv; - const unsigned int *regs; - - if (*type == GMU_ITCM) - regs = a6xx_gmu_itcm_registers; - else - regs = a6xx_gmu_dtcm_registers; - - bytes = (regs[1] - regs[0] + 1) << 2; - - if (remain < bytes + sizeof(*mem_hdr)) { - SNAPSHOT_ERR_NOMEM(device, "GMU Memory"); - return 0; - } - - mem_hdr->type = SNAPSHOT_GMU_MEM_BIN_BLOCK; - mem_hdr->hostaddr = 0; - mem_hdr->gmuaddr = gmu_get_memtype_base(KGSL_GMU_DEVICE(device), *type); - mem_hdr->gpuaddr = 0; - - for (i = regs[0]; i <= regs[1]; i++) - kgsl_regread(device, i, data++); - - return bytes + sizeof(*mem_hdr); -} - - -struct gmu_mem_type_desc { - struct gmu_memdesc *memdesc; - uint32_t type; -}; - -static size_t a6xx_snapshot_gmu_mem(struct kgsl_device *device, - u8 *buf, size_t remain, void *priv) -{ - struct kgsl_snapshot_gmu_mem *mem_hdr = - (struct kgsl_snapshot_gmu_mem *)buf; - struct gmu_mem_type_desc *desc = priv; - unsigned int *data = (unsigned int *)(buf + sizeof(*mem_hdr)); - - if (priv == NULL) - return 0; - - if (remain < desc->memdesc->size + sizeof(*mem_hdr)) { - KGSL_CORE_ERR( - "snapshot: Not enough memory for the gmu section %d\n", - desc->type); - return 0; - } - - memset(mem_hdr, 0, sizeof(*mem_hdr)); - mem_hdr->type = desc->type; - mem_hdr->hostaddr = (uintptr_t)desc->memdesc->hostptr; - mem_hdr->gmuaddr = desc->memdesc->gmuaddr; - mem_hdr->gpuaddr = 0; - - /* Just copy the ringbuffer, there are no active IBs */ - memcpy(data, desc->memdesc->hostptr, desc->memdesc->size); - - return desc->memdesc->size + sizeof(*mem_hdr); -} - -/* - * a6xx_gmu_snapshot() - A6XX GMU snapshot function - * @adreno_dev: Device being snapshotted - * @snapshot: Pointer to the snapshot instance - * - * This is where all of the A6XX GMU specific bits and pieces are grabbed - * into the snapshot memory - */ -static void a6xx_gmu_snapshot(struct adreno_device *adreno_dev, - struct kgsl_snapshot *snapshot) -{ - struct kgsl_device *device = KGSL_DEVICE(adreno_dev); - struct gmu_device *gmu = KGSL_GMU_DEVICE(device); - bool gx_on; - struct gmu_mem_type_desc desc[] = { - {gmu->hfi_mem, SNAPSHOT_GMU_MEM_HFI}, - {gmu->persist_mem, SNAPSHOT_GMU_MEM_BIN_BLOCK}, - {gmu->icache_mem, SNAPSHOT_GMU_MEM_BIN_BLOCK}, - {gmu->dcache_mem, SNAPSHOT_GMU_MEM_BIN_BLOCK}, - {gmu->gmu_log, SNAPSHOT_GMU_MEM_LOG}, - {gmu->dump_mem, SNAPSHOT_GMU_MEM_BIN_BLOCK} }; - unsigned int val, i; - enum gmu_mem_type type; - - if (!gmu_core_isenabled(device)) - return; - - for (i = 0; i < ARRAY_SIZE(desc); i++) { - if (desc[i].memdesc) - kgsl_snapshot_add_section(device, - KGSL_SNAPSHOT_SECTION_GMU_MEMORY, - snapshot, a6xx_snapshot_gmu_mem, - &desc[i]); - } - - type = GMU_ITCM; - kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GMU_MEMORY, - snapshot, a6xx_snapshot_gmu_tcm, &type); - type = GMU_DTCM; - kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GMU_MEMORY, - snapshot, a6xx_snapshot_gmu_tcm, &type); - - adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers, - ARRAY_SIZE(a6xx_gmu_registers) / 2); - - gx_on = a6xx_gmu_gx_is_on(adreno_dev); - - if (gx_on) { - /* Set fence to ALLOW mode so registers can be read */ - kgsl_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0); - kgsl_regread(device, A6XX_GMU_AO_AHB_FENCE_CTRL, &val); - - KGSL_DRV_ERR(device, "set FENCE to ALLOW mode:%x\n", val); - adreno_snapshot_registers(device, snapshot, - a6xx_gmu_gx_registers, - ARRAY_SIZE(a6xx_gmu_gx_registers) / 2); - } -} - static int a6xx_gmu_wait_for_active_transition( struct adreno_device *adreno_dev) { @@ -1673,7 +1544,6 @@ struct gmu_dev_ops adreno_a6xx_gmudev = { .wait_for_gmu_idle = a6xx_gmu_wait_for_idle, .ifpc_store = a6xx_gmu_ifpc_store, .ifpc_show = a6xx_gmu_ifpc_show, - .snapshot = a6xx_gmu_snapshot, .wait_for_active_transition = a6xx_gmu_wait_for_active_transition, .is_initialized = a6xx_gmu_is_initialized, .read_ao_counter = a6xx_gmu_read_ao_counter, diff --git a/drivers/gpu/msm/adreno_a6xx_rgmu.c b/drivers/gpu/msm/adreno_a6xx_rgmu.c index 6efd43705d20..cfd895130bcc 100644 --- a/drivers/gpu/msm/adreno_a6xx_rgmu.c +++ b/drivers/gpu/msm/adreno_a6xx_rgmu.c @@ -22,7 +22,6 @@ #include "a6xx_reg.h" #include "adreno_a6xx.h" #include "adreno_trace.h" -#include "adreno_snapshot.h" /* RGMU timeouts */ #define RGMU_IDLE_TIMEOUT 100 /* ms */ @@ -572,23 +571,6 @@ static void a6xx_rgmu_halt_execution(struct kgsl_device *device) } -/* - * a6xx_rgmu_snapshot() - A6XX GMU snapshot function - * @adreno_dev: Device being snapshotted - * @snapshot: Pointer to the snapshot instance - * - * This is where all of the A6XX GMU specific bits and pieces are grabbed - * into the snapshot memory - */ -static void a6xx_rgmu_snapshot(struct adreno_device *adreno_dev, - struct kgsl_snapshot *snapshot) -{ - struct kgsl_device *device = KGSL_DEVICE(adreno_dev); - - adreno_snapshot_registers(device, snapshot, a6xx_rgmu_registers, - ARRAY_SIZE(a6xx_rgmu_registers) / 2); -} - struct gmu_dev_ops adreno_a6xx_rgmudev = { .load_firmware = a6xx_rgmu_load_firmware, .oob_set = a6xx_rgmu_oob_set, @@ -601,7 +583,6 @@ struct gmu_dev_ops adreno_a6xx_rgmudev = { .wait_for_lowest_idle = a6xx_rgmu_wait_for_lowest_idle, .ifpc_store = a6xx_rgmu_ifpc_store, .ifpc_show = a6xx_rgmu_ifpc_show, - .snapshot = a6xx_rgmu_snapshot, .halt_execution = a6xx_rgmu_halt_execution, .read_ao_counter = a6xx_gmu_read_ao_counter, .gmu2host_intr_mask = RGMU_OOB_IRQ_MASK, diff --git a/drivers/gpu/msm/adreno_cp_parser.c b/drivers/gpu/msm/adreno_cp_parser.c index c69162f37039..3f484b98577d 100644 --- a/drivers/gpu/msm/adreno_cp_parser.c +++ b/drivers/gpu/msm/adreno_cp_parser.c @@ -801,9 +801,6 @@ static int adreno_cp_parse_ib2(struct kgsl_device *device, if (ib_level == 2) return -EINVAL; - /* Save current IB2 statically */ - if (ib2base == gpuaddr) - kgsl_snapshot_push_object(process, gpuaddr, dwords); /* * only try to find sub objects iff this IB has * not been processed already diff --git a/drivers/gpu/msm/adreno_cp_parser.h b/drivers/gpu/msm/adreno_cp_parser.h index 1fa46c147c3c..d879e5079f7c 100644 --- a/drivers/gpu/msm/adreno_cp_parser.h +++ b/drivers/gpu/msm/adreno_cp_parser.h @@ -15,9 +15,6 @@ #include "adreno.h" -extern const unsigned int a3xx_cp_addr_regs[]; -extern const unsigned int a4xx_cp_addr_regs[]; - /* * struct adreno_ib_object - Structure containing information about an * address range found in an IB @@ -134,15 +131,7 @@ static inline void adreno_ib_init_ib_obj(uint64_t gpuaddr, static inline int adreno_cp_parser_getreg(struct adreno_device *adreno_dev, enum adreno_cp_addr_regs reg_enum) { - if (reg_enum == ADRENO_CP_ADDR_MAX) - return -EEXIST; - - if (adreno_is_a3xx(adreno_dev)) - return a3xx_cp_addr_regs[reg_enum]; - else if (adreno_is_a4xx(adreno_dev)) - return a4xx_cp_addr_regs[reg_enum]; - else - return -EEXIST; + return -EEXIST; } /* @@ -160,19 +149,6 @@ static inline int adreno_cp_parser_regindex(struct adreno_device *adreno_dev, enum adreno_cp_addr_regs start, enum adreno_cp_addr_regs end) { - int i; - const unsigned int *regs; - - if (adreno_is_a4xx(adreno_dev)) - regs = a4xx_cp_addr_regs; - else if (adreno_is_a3xx(adreno_dev)) - regs = a3xx_cp_addr_regs; - else - return -EEXIST; - - for (i = start; i <= end && i < ADRENO_CP_ADDR_MAX; i++) - if (regs[i] == offset) - return i; return -EEXIST; } diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c index 2dd651a446b1..7d19d1352b80 100644 --- a/drivers/gpu/msm/adreno_debugfs.c +++ b/drivers/gpu/msm/adreno_debugfs.c @@ -131,11 +131,13 @@ static void sync_event_print(struct seq_file *s, break; } case KGSL_CMD_SYNCPOINT_TYPE_FENCE: { +#ifdef CONFIG_FENCE_DEBUG int i; for (i = 0; i < sync_event->info.num_fences; i++) seq_printf(s, "sync: %s", sync_event->info.fences[i].name); +#endif break; } default: diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c index 4e4be1e64cef..1e90e59b0b45 100644 --- a/drivers/gpu/msm/adreno_dispatch.c +++ b/drivers/gpu/msm/adreno_dispatch.c @@ -276,7 +276,7 @@ static void start_fault_timer(struct adreno_device *adreno_dev) static void _retire_timestamp(struct kgsl_drawobj *drawobj) { struct kgsl_context *context = drawobj->context; - struct adreno_context *drawctxt = ADRENO_CONTEXT(context); + struct adreno_context __maybe_unused *drawctxt = ADRENO_CONTEXT(context); struct kgsl_device *device = context->device; /* @@ -300,12 +300,13 @@ static void _retire_timestamp(struct kgsl_drawobj *drawobj) * rptr scratch out address. At this point GPU clocks turned off. * So avoid reading GPU register directly for A3xx. */ - if (adreno_is_a3xx(ADRENO_DEVICE(device))) + if (adreno_is_a3xx(ADRENO_DEVICE(device))) { trace_adreno_cmdbatch_retired(drawobj, -1, 0, 0, drawctxt->rb, 0, 0); - else + } else { trace_adreno_cmdbatch_retired(drawobj, -1, 0, 0, drawctxt->rb, adreno_get_rptr(drawctxt->rb), 0); + } kgsl_drawobj_destroy(drawobj); } @@ -536,7 +537,7 @@ static int sendcmd(struct adreno_device *adreno_dev, struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; - struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context); + struct adreno_context __maybe_unused *drawctxt = ADRENO_CONTEXT(drawobj->context); struct adreno_dispatcher_drawqueue *dispatch_q = ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(drawobj); struct adreno_submit_time time; @@ -1167,12 +1168,6 @@ static inline int _verify_cmdobj(struct kgsl_device_private *dev_priv, &ADRENO_CONTEXT(context)->base, ib) == false) return -EINVAL; - /* - * Clear the wake on touch bit to indicate an IB has - * been submitted since the last time we set it. - * But only clear it when we have rendering commands. - */ - device->flags &= ~KGSL_FLAG_WAKE_ON_TOUCH; } /* A3XX does not have support for drawobj profiling */ @@ -2333,7 +2328,7 @@ static void cmdobj_profile_ticks(struct adreno_device *adreno_dev, static void retire_cmdobj(struct adreno_device *adreno_dev, struct kgsl_drawobj_cmd *cmdobj) { - struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; + struct adreno_dispatcher __maybe_unused *dispatcher = &adreno_dev->dispatcher; struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context); uint64_t start = 0, end = 0; @@ -2351,15 +2346,16 @@ static void retire_cmdobj(struct adreno_device *adreno_dev, * rptr scratch out address. At this point GPU clocks turned off. * So avoid reading GPU register directly for A3xx. */ - if (adreno_is_a3xx(adreno_dev)) + if (adreno_is_a3xx(adreno_dev)) { trace_adreno_cmdbatch_retired(drawobj, (int) dispatcher->inflight, start, end, ADRENO_DRAWOBJ_RB(drawobj), 0, cmdobj->fault_recovery); - else + } else { trace_adreno_cmdbatch_retired(drawobj, (int) dispatcher->inflight, start, end, ADRENO_DRAWOBJ_RB(drawobj), adreno_get_rptr(drawctxt->rb), cmdobj->fault_recovery); + } drawctxt->submit_retire_ticks[drawctxt->ticks_index] = end - cmdobj->submit_ticks; diff --git a/drivers/gpu/msm/adreno_iommu.c b/drivers/gpu/msm/adreno_iommu.c index 0fb74151ca0a..c6cf3811bb35 100644 --- a/drivers/gpu/msm/adreno_iommu.c +++ b/drivers/gpu/msm/adreno_iommu.c @@ -802,21 +802,15 @@ static int _set_pagetable_cpu(struct adreno_ringbuffer *rb, static int _set_pagetable_gpu(struct adreno_ringbuffer *rb, struct kgsl_pagetable *new_pt) { + static unsigned int link[PAGE_SIZE / sizeof(unsigned int)] + ____cacheline_aligned_in_smp; struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb); - unsigned int *link = NULL, *cmds; + unsigned int *cmds = link; int result; - link = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (link == NULL) - return -ENOMEM; - - cmds = link; - /* If we are in a fault the MMU will be reset soon */ - if (test_bit(ADRENO_DEVICE_FAULT, &adreno_dev->priv)) { - kfree(link); + if (test_bit(ADRENO_DEVICE_FAULT, &adreno_dev->priv)) return 0; - } cmds += adreno_iommu_set_pt_generate_cmds(rb, cmds, new_pt); @@ -838,7 +832,6 @@ static int _set_pagetable_gpu(struct adreno_ringbuffer *rb, KGSL_CMD_FLAGS_PMODE, link, (unsigned int)(cmds - link)); - kfree(link); return result; } diff --git a/drivers/gpu/msm/adreno_profile.h b/drivers/gpu/msm/adreno_profile.h index 4d81abd14837..9fa059ff0c96 100644 --- a/drivers/gpu/msm/adreno_profile.h +++ b/drivers/gpu/msm/adreno_profile.h @@ -62,16 +62,6 @@ struct adreno_profile { #define ADRENO_PROFILE_LOG_BUF_SIZE_DWORDS (ADRENO_PROFILE_LOG_BUF_SIZE / \ sizeof(unsigned int)) -#ifdef CONFIG_DEBUG_FS -void adreno_profile_init(struct adreno_device *adreno_dev); -void adreno_profile_close(struct adreno_device *adreno_dev); -int adreno_profile_process_results(struct adreno_device *adreno_dev); -void adreno_profile_preib_processing(struct adreno_device *adreno_dev, - struct adreno_context *drawctxt, unsigned int *cmd_flags, - unsigned int **rbptr); -void adreno_profile_postib_processing(struct adreno_device *adreno_dev, - unsigned int *cmd_flags, unsigned int **rbptr); -#else static inline void adreno_profile_init(struct adreno_device *adreno_dev) { } static inline void adreno_profile_close(struct adreno_device *adreno_dev) { } static inline int adreno_profile_process_results( @@ -88,7 +78,6 @@ static inline void adreno_profile_preib_processing( static inline void adreno_profile_postib_processing( struct adreno_device *adreno_dev, unsigned int *cmd_flags, unsigned int **rbptr) { } -#endif static inline bool adreno_profile_enabled(struct adreno_profile *profile) { diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index 12ebc174bf16..38953e2b9e0a 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -917,6 +917,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, struct kgsl_memobj_node *ib; unsigned int numibs = 0; unsigned int *link; + unsigned int link_onstack[SZ_256] __aligned(8); unsigned int *cmds; struct kgsl_context *context; struct adreno_context *drawctxt; @@ -1051,10 +1052,15 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, if (gpudev->ccu_invalidate) dwords += 4; - link = kcalloc(dwords, sizeof(unsigned int), GFP_KERNEL); - if (!link) { - ret = -ENOMEM; - goto done; + if (likely(dwords <= ARRAY_SIZE(link_onstack))) { + memset(link_onstack, 0, dwords * sizeof(unsigned int)); + link = link_onstack; + } else { + link = kcalloc(dwords, sizeof(unsigned int), GFP_KERNEL); + if (!link) { + ret = -ENOMEM; + goto done; + } } cmds = link; @@ -1182,7 +1188,8 @@ done: trace_kgsl_issueibcmds(device, context->id, numibs, drawobj->timestamp, drawobj->flags, ret, drawctxt->type); - kfree(link); + if (unlikely(link != link_onstack)) + kfree(link); return ret; } diff --git a/drivers/gpu/msm/adreno_sysfs.c b/drivers/gpu/msm/adreno_sysfs.c index 407c05b79bd3..3b2eb821a92e 100644 --- a/drivers/gpu/msm/adreno_sysfs.c +++ b/drivers/gpu/msm/adreno_sysfs.c @@ -623,7 +623,6 @@ static ADRENO_SYSFS_BOOL(gpu_llc_slice_enable); static ADRENO_SYSFS_BOOL(gpuhtw_llc_slice_enable); static DEVICE_INT_ATTR(wake_nice, 0644, adreno_wake_nice); -static DEVICE_INT_ATTR(wake_timeout, 0644, adreno_wake_timeout); static ADRENO_SYSFS_BOOL(sptp_pc); static ADRENO_SYSFS_BOOL(lm); @@ -647,7 +646,6 @@ static const struct device_attribute *_attr_list[] = { &adreno_attr_ft_long_ib_detect.attr, &adreno_attr_ft_hang_intr_status.attr, &dev_attr_wake_nice.attr, - &dev_attr_wake_timeout.attr, &adreno_attr_sptp_pc.attr, &adreno_attr_lm.attr, &adreno_attr_preemption.attr, diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h index 692698d2a5a6..f574a16e0a28 100644 --- a/drivers/gpu/msm/adreno_trace.h +++ b/drivers/gpu/msm/adreno_trace.h @@ -11,6 +11,31 @@ * */ +#define trace_adreno_cmdbatch_fault(...) {} +#define trace_adreno_cmdbatch_queued(...) {} +#define trace_adreno_cmdbatch_recovery(...) {} +#define trace_adreno_cmdbatch_retired(...) {} +#define trace_adreno_cmdbatch_submitted(...) {} +#define trace_adreno_cmdbatch_sync(...) {} +#define trace_adreno_drawctxt_invalidate(...) {} +#define trace_adreno_drawctxt_sleep(...) {} +#define trace_adreno_drawctxt_switch(...) {} +#define trace_adreno_drawctxt_wait_done(...) {} +#define trace_adreno_drawctxt_wait_start(...) {} +#define trace_adreno_drawctxt_wake(...) {} +#define trace_adreno_gpu_fault(...) {} +#define trace_adreno_hw_preempt_comp_to_clear(...) {} +#define trace_adreno_hw_preempt_token_submit(...) {} +#define trace_adreno_ifpc_count(...) {} +#define trace_adreno_preempt_done(...) {} +#define trace_adreno_preempt_trigger(...) {} +#define trace_adreno_sp_tp(...) {} +#define trace_dispatch_queue_context(...) {} +#define trace_kgsl_a3xx_irq_status(...) {} +#define trace_kgsl_a4xx_irq_status(...) {} +#define trace_kgsl_a5xx_irq_status(...) {} + +#if 0 #if !defined(_ADRENO_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) #define _ADRENO_TRACE_H @@ -626,3 +651,4 @@ TRACE_EVENT(adreno_ifpc_count, /* This part must be outside protection */ #include +#endif diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index e68a795032ea..ebc036910380 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -4906,9 +4906,6 @@ int kgsl_device_platform_probe(struct kgsl_device *device) if (status) return status; - /* Initialize logging first, so that failures below actually print. */ - kgsl_device_debugfs_init(device); - /* Disable the sparse ioctl invocation as they are not used */ device->flags &= ~KGSL_FLAG_SPARSE; @@ -4970,7 +4967,7 @@ int kgsl_device_platform_probe(struct kgsl_device *device) } status = devm_request_irq(device->dev, device->pwrctrl.interrupt_num, - kgsl_irq_handler, IRQF_TRIGGER_HIGH, + kgsl_irq_handler, IRQF_TRIGGER_HIGH | IRQF_PERF_CRITICAL, device->name, device); if (status) { KGSL_DRV_ERR(device, "request_irq(%d) failed: %d\n", @@ -5056,7 +5053,6 @@ error_close_mmu: error_pwrctrl_close: kgsl_pwrctrl_close(device); error: - kgsl_device_debugfs_close(device); _unregister_device(device); return status; } @@ -5084,7 +5080,6 @@ void kgsl_device_platform_remove(struct kgsl_device *device) kgsl_pwrctrl_close(device); - kgsl_device_debugfs_close(device); _unregister_device(device); } EXPORT_SYMBOL(kgsl_device_platform_remove); @@ -5120,7 +5115,7 @@ static long kgsl_run_one_worker(struct kthread_worker *worker, struct task_struct **thread, const char *name) { kthread_init_worker(worker); - *thread = kthread_run(kthread_worker_fn, worker, name); + *thread = kthread_run_perf_critical(kthread_worker_fn, worker, name); if (IS_ERR(*thread)) { pr_err("unable to start %s\n", name); return PTR_ERR(thread); diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c index fbd5a44ba0fa..bf21db83d7f8 100644 --- a/drivers/gpu/msm/kgsl_debugfs.c +++ b/drivers/gpu/msm/kgsl_debugfs.c @@ -19,79 +19,9 @@ #include "kgsl_sharedmem.h" #include "kgsl_debugfs.h" -/*default log levels is error for everything*/ -#define KGSL_LOG_LEVEL_MAX 7 - struct dentry *kgsl_debugfs_dir; static struct dentry *proc_d_debugfs; -static inline int kgsl_log_set(unsigned int *log_val, void *data, u64 val) -{ - *log_val = min_t(unsigned int, val, KGSL_LOG_LEVEL_MAX); - return 0; -} - -#define KGSL_DEBUGFS_LOG(__log) \ -static int __log ## _set(void *data, u64 val) \ -{ \ - struct kgsl_device *device = data; \ - return kgsl_log_set(&device->__log, data, val); \ -} \ -static int __log ## _get(void *data, u64 *val) \ -{ \ - struct kgsl_device *device = data; \ - *val = device->__log; \ - return 0; \ -} \ -DEFINE_SIMPLE_ATTRIBUTE(__log ## _fops, \ -__log ## _get, __log ## _set, "%llu\n") \ - -KGSL_DEBUGFS_LOG(drv_log); -KGSL_DEBUGFS_LOG(cmd_log); -KGSL_DEBUGFS_LOG(ctxt_log); -KGSL_DEBUGFS_LOG(mem_log); -KGSL_DEBUGFS_LOG(pwr_log); - -static int _strict_set(void *data, u64 val) -{ - kgsl_sharedmem_set_noretry(val ? true : false); - return 0; -} - -static int _strict_get(void *data, u64 *val) -{ - *val = kgsl_sharedmem_get_noretry(); - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(_strict_fops, _strict_get, _strict_set, "%llu\n"); - -void kgsl_device_debugfs_init(struct kgsl_device *device) -{ - if (kgsl_debugfs_dir && !IS_ERR(kgsl_debugfs_dir)) - device->d_debugfs = debugfs_create_dir(device->name, - kgsl_debugfs_dir); - - if (!device->d_debugfs || IS_ERR(device->d_debugfs)) - return; - - debugfs_create_file("log_level_cmd", 0644, device->d_debugfs, device, - &cmd_log_fops); - debugfs_create_file("log_level_ctxt", 0644, device->d_debugfs, device, - &ctxt_log_fops); - debugfs_create_file("log_level_drv", 0644, device->d_debugfs, device, - &drv_log_fops); - debugfs_create_file("log_level_mem", 0644, device->d_debugfs, device, - &mem_log_fops); - debugfs_create_file("log_level_pwr", 0644, device->d_debugfs, device, - &pwr_log_fops); -} - -void kgsl_device_debugfs_close(struct kgsl_device *device) -{ - debugfs_remove_recursive(device->d_debugfs); -} - struct type_entry { int type; const char *str; @@ -294,96 +224,6 @@ static const struct file_operations process_mem_fops = { .release = process_mem_release, }; -static int print_sparse_mem_entry(int id, void *ptr, void *data) -{ - struct seq_file *s = data; - struct kgsl_mem_entry *entry = ptr; - struct kgsl_memdesc *m = &entry->memdesc; - struct rb_node *node; - - if (!(m->flags & KGSL_MEMFLAGS_SPARSE_VIRT)) - return 0; - - spin_lock(&entry->bind_lock); - node = rb_first(&entry->bind_tree); - - while (node != NULL) { - struct sparse_bind_object *obj = rb_entry(node, - struct sparse_bind_object, node); - seq_printf(s, "%5d %16llx %16llx %16llx %16llx\n", - entry->id, entry->memdesc.gpuaddr, - obj->v_off, obj->size, obj->p_off); - node = rb_next(node); - } - spin_unlock(&entry->bind_lock); - - seq_putc(s, '\n'); - - return 0; -} - -static int process_sparse_mem_print(struct seq_file *s, void *unused) -{ - struct kgsl_process_private *private = s->private; - - seq_printf(s, "%5s %16s %16s %16s %16s\n", - "v_id", "gpuaddr", "v_offset", "v_size", "p_offset"); - - spin_lock(&private->mem_lock); - idr_for_each(&private->mem_idr, print_sparse_mem_entry, s); - spin_unlock(&private->mem_lock); - - return 0; -} - -static int process_sparse_mem_open(struct inode *inode, struct file *file) -{ - int ret; - pid_t pid = (pid_t) (unsigned long) inode->i_private; - struct kgsl_process_private *private = NULL; - - private = kgsl_process_private_find(pid); - - if (!private) - return -ENODEV; - - ret = single_open(file, process_sparse_mem_print, private); - if (ret) - kgsl_process_private_put(private); - - return ret; -} - -static const struct file_operations process_sparse_mem_fops = { - .open = process_sparse_mem_open, - .read = seq_read, - .llseek = seq_lseek, - .release = process_mem_release, -}; - -static int globals_print(struct seq_file *s, void *unused) -{ - kgsl_print_global_pt_entries(s); - return 0; -} - -static int globals_open(struct inode *inode, struct file *file) -{ - return single_open(file, globals_print, NULL); -} - -static int globals_release(struct inode *inode, struct file *file) -{ - return single_release(inode, file); -} - -static const struct file_operations global_fops = { - .open = globals_open, - .read = seq_read, - .llseek = seq_lseek, - .release = globals_release, -}; - /** * kgsl_process_init_debugfs() - Initialize debugfs for a process * @private: Pointer to process private structure created for the process @@ -423,31 +263,12 @@ void kgsl_process_init_debugfs(struct kgsl_process_private *private) if (IS_ERR_OR_NULL(dentry)) WARN((dentry == NULL), "Unable to create 'mem' file for %s\n", name); - - dentry = debugfs_create_file("sparse_mem", 0444, private->debug_root, - (void *) ((unsigned long) private->pid), - &process_sparse_mem_fops); - - if (IS_ERR_OR_NULL(dentry)) - WARN((dentry == NULL), - "Unable to create 'sparse_mem' file for %s\n", name); - } void kgsl_core_debugfs_init(void) { - struct dentry *debug_dir; - kgsl_debugfs_dir = debugfs_create_dir("kgsl", NULL); - debugfs_create_file("globals", 0444, kgsl_debugfs_dir, NULL, - &global_fops); - - debug_dir = debugfs_create_dir("debug", kgsl_debugfs_dir); - - debugfs_create_file("strict_memory", 0644, debug_dir, NULL, - &_strict_fops); - proc_d_debugfs = debugfs_create_dir("proc", kgsl_debugfs_dir); } diff --git a/drivers/gpu/msm/kgsl_debugfs.h b/drivers/gpu/msm/kgsl_debugfs.h index 7c9ab8f925a2..3774dfa30889 100644 --- a/drivers/gpu/msm/kgsl_debugfs.h +++ b/drivers/gpu/msm/kgsl_debugfs.h @@ -22,9 +22,6 @@ struct kgsl_process_private; void kgsl_core_debugfs_init(void); void kgsl_core_debugfs_close(void); -void kgsl_device_debugfs_init(struct kgsl_device *device); -void kgsl_device_debugfs_close(struct kgsl_device *device); - extern struct dentry *kgsl_debugfs_dir; static inline struct dentry *kgsl_get_debugfs_dir(void) { @@ -34,8 +31,6 @@ static inline struct dentry *kgsl_get_debugfs_dir(void) void kgsl_process_init_debugfs(struct kgsl_process_private *priv); #else static inline void kgsl_core_debugfs_init(void) { } -static inline void kgsl_device_debugfs_init(struct kgsl_device *device) { } -static inline void kgsl_device_debugfs_close(struct kgsl_device *device) { } static inline void kgsl_core_debugfs_close(void) { } static inline struct dentry *kgsl_get_debugfs_dir(void) { return NULL; } static inline void kgsl_process_init_debugfs(struct kgsl_process_private *priv) diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h index 0d2a47261796..825836fd4e2c 100644 --- a/drivers/gpu/msm/kgsl_device.h +++ b/drivers/gpu/msm/kgsl_device.h @@ -68,8 +68,7 @@ enum kgsl_event_results { KGSL_EVENT_CANCELLED = 2, }; -#define KGSL_FLAG_WAKE_ON_TOUCH BIT(0) -#define KGSL_FLAG_SPARSE BIT(1) +#define KGSL_FLAG_SPARSE BIT(0) /* * "list" of event types for ftrace symbolic magic @@ -731,10 +730,13 @@ void kgsl_device_platform_remove(struct kgsl_device *device); const char *kgsl_pwrstate_to_str(unsigned int state); -int kgsl_device_snapshot_init(struct kgsl_device *device); -void kgsl_device_snapshot(struct kgsl_device *device, - struct kgsl_context *context, bool gmu_fault); -void kgsl_device_snapshot_close(struct kgsl_device *device); +static inline int kgsl_device_snapshot_init(struct kgsl_device *device) +{ + return 0; +} +static inline void kgsl_device_snapshot(struct kgsl_device *device, + struct kgsl_context *context, bool gmu_fault) {} +static inline void kgsl_device_snapshot_close(struct kgsl_device *device) {} void kgsl_events_init(void); void kgsl_events_exit(void); diff --git a/drivers/gpu/msm/kgsl_drawobj.c b/drivers/gpu/msm/kgsl_drawobj.c index 05c41362e11d..f003f522e07f 100644 --- a/drivers/gpu/msm/kgsl_drawobj.c +++ b/drivers/gpu/msm/kgsl_drawobj.c @@ -42,6 +42,11 @@ */ static struct kmem_cache *memobjs_cache; static struct kmem_cache *sparseobjs_cache; +static struct kmem_cache *drawobj_sparse_cache; +static struct kmem_cache *drawobj_sync_cache; +static struct kmem_cache *drawobj_cmd_cache; + +#ifdef CONFIG_FENCE_DEBUG static void free_fence_names(struct kgsl_drawobj_sync *syncobj) { @@ -54,6 +59,7 @@ static void free_fence_names(struct kgsl_drawobj_sync *syncobj) kfree(event->info.fences); } } +#endif void kgsl_drawobj_destroy_object(struct kref *kref) { @@ -66,16 +72,18 @@ void kgsl_drawobj_destroy_object(struct kref *kref) switch (drawobj->type) { case SYNCOBJ_TYPE: syncobj = SYNCOBJ(drawobj); +#ifdef CONFIG_FENCE_DEBUG free_fence_names(syncobj); +#endif kfree(syncobj->synclist); - kfree(syncobj); + kmem_cache_free(drawobj_sync_cache, syncobj); break; case CMDOBJ_TYPE: case MARKEROBJ_TYPE: - kfree(CMDOBJ(drawobj)); + kmem_cache_free(drawobj_cmd_cache, CMDOBJ(drawobj)); break; case SPARSEOBJ_TYPE: - kfree(SPARSEOBJ(drawobj)); + kmem_cache_free(drawobj_sparse_cache, SPARSEOBJ(drawobj)); break; } } @@ -107,12 +115,14 @@ void kgsl_dump_syncpoints(struct kgsl_device *device, break; } case KGSL_CMD_SYNCPOINT_TYPE_FENCE: { +#ifdef CONFIG_FENCE_DEBUG int j; struct event_fence_info *info = &event->info; for (j = 0; j < info->num_fences; j++) dev_err(device->dev, "[%d] fence: %s\n", i, info->fences[j].name); +#endif break; } } @@ -146,10 +156,6 @@ static void syncobj_timer(unsigned long data) "kgsl: possible gpu syncpoint deadlock for context %u timestamp %u\n", drawobj->context->id, drawobj->timestamp); - set_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv); - kgsl_context_dump(drawobj->context); - clear_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv); - dev_err(device->dev, " pending events:\n"); for (i = 0; i < syncobj->numsyncs; i++) { @@ -164,12 +170,14 @@ static void syncobj_timer(unsigned long data) i, event->context->id, event->timestamp); break; case KGSL_CMD_SYNCPOINT_TYPE_FENCE: { +#ifdef CONFIG_FENCE_DEBUG int j; struct event_fence_info *info = &event->info; for (j = 0; j < info->num_fences; j++) dev_err(device->dev, " [%u] FENCE %s\n", i, info->fences[j].name); +#endif break; } } @@ -355,11 +363,13 @@ EXPORT_SYMBOL(kgsl_drawobj_destroy); static bool drawobj_sync_fence_func(void *priv) { struct kgsl_drawobj_sync_event *event = priv; +#ifdef CONFIG_FENCE_DEBUG int i; for (i = 0; i < event->info.num_fences; i++) trace_syncpoint_fence_expire(event->syncobj, event->info.fences[i].name); +#endif /* * Only call kgsl_drawobj_put() if it's not marked for cancellation @@ -385,8 +395,10 @@ static int drawobj_add_sync_fence(struct kgsl_device *device, struct kgsl_cmd_syncpoint_fence *sync = priv; struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); struct kgsl_drawobj_sync_event *event; - unsigned int id, i; - + unsigned int id; +#ifdef CONFIG_FENCE_DEBUG + unsigned int i; +#endif kref_get(&drawobj->refcount); id = syncobj->numsyncs++; @@ -423,8 +435,10 @@ static int drawobj_add_sync_fence(struct kgsl_device *device, return ret; } +#ifdef CONFIG_FENCE_DEBUG for (i = 0; i < event->info.num_fences; i++) trace_syncpoint_fence(syncobj, event->info.fences[i].name); +#endif return 0; } @@ -518,20 +532,24 @@ int kgsl_drawobj_sync_add_sync(struct kgsl_device *device, struct kgsl_cmd_syncpoint *sync) { void *priv; - int ret, psize; + int psize; struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); int (*func)(struct kgsl_device *device, struct kgsl_drawobj_sync *syncobj, void *priv); + struct kgsl_cmd_syncpoint_timestamp sync_timestamp; + struct kgsl_cmd_syncpoint_fence sync_fence; switch (sync->type) { case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: psize = sizeof(struct kgsl_cmd_syncpoint_timestamp); func = drawobj_add_sync_timestamp; + priv = &sync_timestamp; break; case KGSL_CMD_SYNCPOINT_TYPE_FENCE: psize = sizeof(struct kgsl_cmd_syncpoint_fence); func = drawobj_add_sync_fence; + priv = &sync_fence; break; default: KGSL_DRV_ERR(device, @@ -547,19 +565,10 @@ int kgsl_drawobj_sync_add_sync(struct kgsl_device *device, return -EINVAL; } - priv = kzalloc(sync->size, GFP_KERNEL); - if (priv == NULL) - return -ENOMEM; - - if (copy_from_user(priv, sync->priv, sync->size)) { - kfree(priv); + if (copy_from_user(priv, sync->priv, sync->size)) return -EFAULT; - } - ret = func(device, syncobj, priv); - kfree(priv); - - return ret; + return func(device, syncobj, priv); } static void add_profiling_buffer(struct kgsl_device *device, @@ -685,12 +694,27 @@ int kgsl_drawobj_cmd_add_ibdesc(struct kgsl_device *device, } static void *_drawobj_create(struct kgsl_device *device, - struct kgsl_context *context, unsigned int size, - unsigned int type) + struct kgsl_context *context, unsigned int type) { - void *obj = kzalloc(size, GFP_KERNEL); + void *obj; struct kgsl_drawobj *drawobj; + switch (type) { + case SYNCOBJ_TYPE: + obj = kmem_cache_zalloc(drawobj_sync_cache, GFP_KERNEL); + break; + case CMDOBJ_TYPE: + case MARKEROBJ_TYPE: + obj = kmem_cache_zalloc(drawobj_cmd_cache, GFP_KERNEL); + break; + case SPARSEOBJ_TYPE: + obj = kmem_cache_zalloc(drawobj_sparse_cache, GFP_KERNEL); + break; + default: + // noop + return ERR_PTR(-ENOMEM); + } + if (obj == NULL) return ERR_PTR(-ENOMEM); @@ -699,7 +723,18 @@ static void *_drawobj_create(struct kgsl_device *device, * during the lifetime of this object */ if (!_kgsl_context_get(context)) { - kfree(obj); + switch (type) { + case SYNCOBJ_TYPE: + kmem_cache_free(drawobj_sync_cache, obj); + break; + case CMDOBJ_TYPE: + case MARKEROBJ_TYPE: + kmem_cache_free(drawobj_cmd_cache, obj); + break; + case SPARSEOBJ_TYPE: + kmem_cache_free(drawobj_sparse_cache, obj); + break; + } return ERR_PTR(-ENOENT); } @@ -727,7 +762,7 @@ struct kgsl_drawobj_sparse *kgsl_drawobj_sparse_create( struct kgsl_context *context, unsigned int flags) { struct kgsl_drawobj_sparse *sparseobj = _drawobj_create(device, - context, sizeof(*sparseobj), SPARSEOBJ_TYPE); + context, SPARSEOBJ_TYPE); if (!IS_ERR(sparseobj)) INIT_LIST_HEAD(&sparseobj->sparselist); @@ -747,7 +782,7 @@ struct kgsl_drawobj_sync *kgsl_drawobj_sync_create(struct kgsl_device *device, struct kgsl_context *context) { struct kgsl_drawobj_sync *syncobj = _drawobj_create(device, - context, sizeof(*syncobj), SYNCOBJ_TYPE); + context, SYNCOBJ_TYPE); /* Add a timer to help debug sync deadlocks */ if (!IS_ERR(syncobj)) @@ -772,8 +807,7 @@ struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device, unsigned int type) { struct kgsl_drawobj_cmd *cmdobj = _drawobj_create(device, - context, sizeof(*cmdobj), - (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE))); + context, (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE))); if (!IS_ERR(cmdobj)) { /* sanitize our flags for drawobj's */ @@ -1167,14 +1201,23 @@ void kgsl_drawobjs_cache_exit(void) { kmem_cache_destroy(memobjs_cache); kmem_cache_destroy(sparseobjs_cache); + + kmem_cache_destroy(drawobj_sparse_cache); + kmem_cache_destroy(drawobj_sync_cache); + kmem_cache_destroy(drawobj_cmd_cache); } int kgsl_drawobjs_cache_init(void) { - memobjs_cache = KMEM_CACHE(kgsl_memobj_node, 0); - sparseobjs_cache = KMEM_CACHE(kgsl_sparseobj_node, 0); + memobjs_cache = KMEM_CACHE(kgsl_memobj_node, SLAB_HWCACHE_ALIGN); + sparseobjs_cache = KMEM_CACHE(kgsl_sparseobj_node, SLAB_HWCACHE_ALIGN); - if (!memobjs_cache || !sparseobjs_cache) + drawobj_sparse_cache = KMEM_CACHE(kgsl_drawobj_sparse, SLAB_HWCACHE_ALIGN); + drawobj_sync_cache = KMEM_CACHE(kgsl_drawobj_sync, SLAB_HWCACHE_ALIGN); + drawobj_cmd_cache = KMEM_CACHE(kgsl_drawobj_cmd, SLAB_HWCACHE_ALIGN); + + if (!memobjs_cache || !sparseobjs_cache || + !drawobj_sparse_cache || !drawobj_sync_cache || !drawobj_cmd_cache) return -ENOMEM; return 0; diff --git a/drivers/gpu/msm/kgsl_drawobj.h b/drivers/gpu/msm/kgsl_drawobj.h index bd32f5e503a7..79174751b944 100644 --- a/drivers/gpu/msm/kgsl_drawobj.h +++ b/drivers/gpu/msm/kgsl_drawobj.h @@ -107,12 +107,16 @@ struct kgsl_drawobj_sync { #define KGSL_FENCE_NAME_LEN 74 +#ifdef CONFIG_FENCE_DEBUG struct fence_info { char name[KGSL_FENCE_NAME_LEN]; }; +#endif struct event_fence_info { +#ifdef CONFIG_FENCE_DEBUG struct fence_info *fences; +#endif int num_fences; }; diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c index 5689c293e7fb..91544b6a978f 100644 --- a/drivers/gpu/msm/kgsl_events.c +++ b/drivers/gpu/msm/kgsl_events.c @@ -61,7 +61,6 @@ const char *prio_to_string(enum kgsl_priority prio) static void _kgsl_event_worker(struct kthread_work *work) { struct kgsl_event *event = container_of(work, struct kgsl_event, work); - int id = KGSL_CONTEXT_ID(event->context); trace_kgsl_fire_event(id, event->timestamp, event->result, jiffies - event->created, event->func, event->prio); diff --git a/drivers/gpu/msm/kgsl_ioctl.c b/drivers/gpu/msm/kgsl_ioctl.c index 9b02e1993a09..fce411b3c781 100644 --- a/drivers/gpu/msm/kgsl_ioctl.c +++ b/drivers/gpu/msm/kgsl_ioctl.c @@ -17,6 +17,7 @@ #include #include "kgsl_device.h" #include "kgsl_sync.h" +#include "adreno.h" static const struct kgsl_ioctl kgsl_ioctl_funcs[] = { KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_GETPROPERTY, @@ -168,8 +169,13 @@ long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { struct kgsl_device_private *dev_priv = filep->private_data; struct kgsl_device *device = dev_priv->device; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); long ret; + if (cmd == IOCTL_KGSL_GPU_COMMAND && + READ_ONCE(device->state) != KGSL_STATE_ACTIVE) + kgsl_schedule_work(&adreno_dev->pwr_on_work); + ret = kgsl_ioctl_helper(filep, cmd, arg, kgsl_ioctl_funcs, ARRAY_SIZE(kgsl_ioctl_funcs)); diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c index ea9e52354f56..47804e2ff267 100644 --- a/drivers/gpu/msm/kgsl_pool.c +++ b/drivers/gpu/msm/kgsl_pool.c @@ -478,12 +478,16 @@ kgsl_pool_shrink_scan_objects(struct shrinker *shrinker, /* nr represents number of pages to be removed*/ int nr = sc->nr_to_scan; int total_pages = kgsl_pool_size_total(); + unsigned long ret; /* Target pages represents new pool size */ int target_pages = (nr > total_pages) ? 0 : (total_pages - nr); /* Reduce pool size to target_pages */ - return kgsl_pool_reduce(target_pages, false); + ret = kgsl_pool_reduce(target_pages, false); + + /* If we are unable to shrink more, stop trying */ + return (ret == 0) ? SHRINK_STOP : ret; } static unsigned long diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c index 40ad5fe70750..e6cbd707071e 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.c +++ b/drivers/gpu/msm/kgsl_pwrctrl.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2010-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -28,7 +28,6 @@ #include "kgsl_device.h" #include "kgsl_trace.h" #include "kgsl_gmu_core.h" -#include "kgsl_trace_power.h" #define KGSL_PWRFLAGS_POWER_ON 0 #define KGSL_PWRFLAGS_CLK_ON 1 @@ -385,7 +384,6 @@ unsigned int kgsl_pwrctrl_adjust_pwrlevel(struct kgsl_device *device, unsigned int new_level) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; - unsigned int old_level = pwr->active_pwrlevel; /* If a pwr constraint is expired, remove it */ if ((pwr->constraint.type != KGSL_CONSTRAINT_NONE) && @@ -493,8 +491,6 @@ void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device, pwr->previous_pwrlevel, pwr->pwrlevels[old_level].gpu_freq); - trace_gpu_frequency(pwrlevel->gpu_freq/1000, 0); - /* * Some targets do not support the bandwidth requirement of * GPU at TURBO, for such targets we need to set GPU-BIMC @@ -2792,7 +2788,6 @@ static int _wake(struct kgsl_device *device) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; int status = 0; - unsigned int state = device->state; switch (device->state) { case KGSL_STATE_SUSPEND: @@ -2819,9 +2814,6 @@ static int _wake(struct kgsl_device *device) /* Turn on the core clocks */ kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE); - if (state == KGSL_STATE_SLUMBER || state == KGSL_STATE_SUSPEND) - trace_gpu_frequency( - pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq/1000, 0); /* * No need to turn on/off irq here as it no longer affects * power collapse @@ -3030,7 +3022,6 @@ _slumber(struct kgsl_device *device) kgsl_pwrctrl_clk_set_options(device, false); kgsl_pwrctrl_disable(device); kgsl_pwrscale_sleep(device); - trace_gpu_frequency(0, 0); kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER); pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma, PM_QOS_DEFAULT_VALUE); @@ -3046,7 +3037,6 @@ _slumber(struct kgsl_device *device) break; case KGSL_STATE_AWARE: kgsl_pwrctrl_disable(device); - trace_gpu_frequency(0, 0); kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER); break; default: diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c index 3993d2ab3635..f7289ba2e881 100644 --- a/drivers/gpu/msm/kgsl_pwrscale.c +++ b/drivers/gpu/msm/kgsl_pwrscale.c @@ -467,7 +467,6 @@ static int popp_trans2(struct kgsl_device *device, int level) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct kgsl_pwrscale *psc = &device->pwrscale; - int old_level = psc->popp_level; if (!test_bit(POPP_ON, &psc->popp_state)) return level; diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c index c353b912105f..9e05b8771caf 100644 --- a/drivers/gpu/msm/kgsl_sync.c +++ b/drivers/gpu/msm/kgsl_sync.c @@ -318,32 +318,40 @@ static const char *kgsl_sync_fence_driver_name(struct dma_fence *fence) static const char *kgsl_sync_timeline_name(struct dma_fence *fence) { +#ifdef CONFIG_FENCE_DEBUG struct kgsl_sync_fence *kfence = (struct kgsl_sync_fence *)fence; struct kgsl_sync_timeline *ktimeline = kfence->parent; return ktimeline->name; +#else + return "kgsl_sync_timeline"; +#endif } int kgsl_sync_timeline_create(struct kgsl_context *context) { struct kgsl_sync_timeline *ktimeline; +#ifdef CONFIG_FENCE_DEBUG /* * Generate a name which includes the thread name, thread id, process * name, process id, and context id. This makes it possible to * identify the context of a timeline in the sync dump. */ char ktimeline_name[sizeof(ktimeline->name)] = {}; +#endif /* Put context when timeline is released */ if (!_kgsl_context_get(context)) return -ENOENT; +#ifdef CONFIG_FENCE_DEBUG snprintf(ktimeline_name, sizeof(ktimeline_name), "%s_%d-%.15s(%d)-%.15s(%d)", context->device->name, context->id, current->group_leader->comm, current->group_leader->pid, current->comm, current->pid); +#endif ktimeline = kzalloc(sizeof(*ktimeline), GFP_KERNEL); if (ktimeline == NULL) { @@ -352,7 +360,9 @@ int kgsl_sync_timeline_create(struct kgsl_context *context) } kref_init(&ktimeline->kref); +#ifdef CONFIG_FENCE_DEBUG strlcpy(ktimeline->name, ktimeline_name, KGSL_TIMELINE_NAME_LEN); +#endif ktimeline->fence_context = dma_fence_context_alloc(1); ktimeline->last_timestamp = 0; INIT_LIST_HEAD(&ktimeline->child_list_head); @@ -440,6 +450,7 @@ static void kgsl_sync_fence_callback(struct dma_fence *fence, } } +#ifdef CONFIG_FENCE_DEBUG static void kgsl_get_fence_names(struct dma_fence *fence, struct event_fence_info *info_ptr) { @@ -485,6 +496,7 @@ static void kgsl_get_fence_names(struct dma_fence *fence, } } } +#endif struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd, bool (*func)(void *priv), void *priv, struct event_fence_info *info_ptr) @@ -497,6 +509,11 @@ struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd, if (fence == NULL) return ERR_PTR(-EINVAL); + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + dma_fence_put(fence); + return NULL; + } + /* create the callback */ kcb = kzalloc(sizeof(*kcb), GFP_ATOMIC); if (kcb == NULL) { @@ -508,7 +525,9 @@ struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd, kcb->priv = priv; kcb->func = func; +#ifdef CONFIG_FENCE_DEBUG kgsl_get_fence_names(fence, info_ptr); +#endif /* if status then error or signaled */ status = dma_fence_add_callback(fence, &kcb->fence_cb, diff --git a/drivers/gpu/msm/kgsl_sync.h b/drivers/gpu/msm/kgsl_sync.h index e970c9451058..07732f580e9d 100644 --- a/drivers/gpu/msm/kgsl_sync.h +++ b/drivers/gpu/msm/kgsl_sync.h @@ -32,7 +32,9 @@ */ struct kgsl_sync_timeline { struct kref kref; +#ifdef CONFIG_FENCE_DEBUG char name[KGSL_TIMELINE_NAME_LEN]; +#endif u64 fence_context; diff --git a/drivers/gpu/msm/kgsl_trace.c b/drivers/gpu/msm/kgsl_trace.c index 5fd48852b3f3..3541425ff643 100644 --- a/drivers/gpu/msm/kgsl_trace.c +++ b/drivers/gpu/msm/kgsl_trace.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011, 2013, 2015, 2019 The Linux Foundation. All rights reserved. +/* Copyright (c) 2011, 2013, 2015 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -19,11 +19,8 @@ /* Instantiate tracepoints */ #define CREATE_TRACE_POINTS #include "kgsl_trace.h" -#include "kgsl_trace_power.h" EXPORT_TRACEPOINT_SYMBOL(kgsl_regwrite); EXPORT_TRACEPOINT_SYMBOL(kgsl_issueibcmds); EXPORT_TRACEPOINT_SYMBOL(kgsl_user_pwrlevel_constraint); EXPORT_TRACEPOINT_SYMBOL(kgsl_constraint); - -EXPORT_TRACEPOINT_SYMBOL(gpu_frequency); diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h index e279edc54587..a77a67e73f4f 100644 --- a/drivers/gpu/msm/kgsl_trace.h +++ b/drivers/gpu/msm/kgsl_trace.h @@ -11,6 +11,61 @@ * */ +#define trace_kgsl_active_count(...) {} +#define trace_kgsl_bus(...) {} +#define trace_kgsl_buslevel(...) {} +#define trace_kgsl_clk(...) {} +#define trace_kgsl_clock_throttling(...) {} +#define trace_kgsl_constraint(...) {} +#define trace_kgsl_context_create(...) {} +#define trace_kgsl_context_destroy(...) {} +#define trace_kgsl_context_detach(...) {} +#define trace_kgsl_fire_event(...) {} +#define trace_kgsl_gmu_oob_clear(...) {} +#define trace_kgsl_gmu_oob_set(...) {} +#define trace_kgsl_gpubusy(...) {} +#define trace_kgsl_hfi_receive(...) {} +#define trace_kgsl_hfi_send(...) {} +#define trace_kgsl_irq(...) {} +#define trace_kgsl_issueibcmds(...) {} +#define trace_kgsl_mem_alloc(...) {} +#define trace_kgsl_mem_free(...) {} +#define trace_kgsl_mem_map(...) {} +#define trace_kgsl_mem_mmap(...) {} +#define trace_kgsl_mem_sync_cache(...) {} +#define trace_kgsl_mem_sync_full_cache(...) {} +#define trace_kgsl_mem_timestamp_free(...) {} +#define trace_kgsl_mem_timestamp_queue(...) {} +#define trace_kgsl_mem_unmapped_area_collision(...) {} +#define trace_kgsl_mmu_pagefault(...) {} +#define trace_kgsl_msg(...) {} +#define trace_kgsl_pagetable_destroy(...) {} +#define trace_kgsl_popp_level(...) {} +#define trace_kgsl_popp_mod(...) {} +#define trace_kgsl_popp_nap(...) {} +#define trace_kgsl_pwrlevel(...) {} +#define trace_kgsl_pwr_request_state(...) {} +#define trace_kgsl_pwr_set_state(...) {} +#define trace_kgsl_pwrstats(...) {} +#define trace_kgsl_rail(...) {} +#define trace_kgsl_readtimestamp(...) {} +#define trace_kgsl_register_event(...) {} +#define trace_kgsl_regwrite(...) {} +#define trace_kgsl_user_pwrlevel_constraint(...) {} +#define trace_kgsl_waittimestamp_entry(...) {} +#define trace_kgsl_waittimestamp_exit(...) {} +#define trace_sparse_bind(...) {} +#define trace_sparse_phys_alloc(...) {} +#define trace_sparse_phys_free(...) {} +#define trace_sparse_unbind(...) {} +#define trace_sparse_virt_alloc(...) {} +#define trace_sparse_virt_free(...) {} +#define trace_syncpoint_fence(...) {} +#define trace_syncpoint_fence_expire(...) {} +#define trace_syncpoint_timestamp(...) {} +#define trace_syncpoint_timestamp_expire(...) {} + +#if 0 #if !defined(_KGSL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) #define _KGSL_TRACE_H @@ -1290,3 +1345,4 @@ DEFINE_EVENT(hfi_msg_template, kgsl_hfi_receive, /* This part must be outside protection */ #include +#endif diff --git a/drivers/gpu/msm/kgsl_trace_power.h b/drivers/gpu/msm/kgsl_trace_power.h deleted file mode 100644 index 08ae0cb493fa..000000000000 --- a/drivers/gpu/msm/kgsl_trace_power.h +++ /dev/null @@ -1,49 +0,0 @@ -/* Copyright (c) 2019, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#if !defined(_KGSL_TRACE_POWER_H) || defined(TRACE_HEADER_MULTI_READ) -#define _KGSL_TRACE_POWER_H - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM power -#undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_FILE kgsl_trace_power - -#include - -/** - * gpu_frequency - Reports frequency changes in GPU clock domains - * @state: New frequency (in KHz) - * @gpu_id: GPU clock domain - */ -TRACE_EVENT(gpu_frequency, - TP_PROTO(unsigned int state, unsigned int gpu_id), - TP_ARGS(state, gpu_id), - TP_STRUCT__entry( - __field(unsigned int, state) - __field(unsigned int, gpu_id) - ), - TP_fast_assign( - __entry->state = state; - __entry->gpu_id = gpu_id; - ), - - TP_printk("state=%lu gpu_id=%lu", - (unsigned long)__entry->state, - (unsigned long)__entry->gpu_id) -); - -#endif /* _KGSL_TRACE_POWER_H */ - -/* This part must be outside protection */ -#include diff --git a/drivers/hwtracing/coresight/coresight-ost.c b/drivers/hwtracing/coresight/coresight-ost.c index 4a277148196b..662196cb39b6 100644 --- a/drivers/hwtracing/coresight/coresight-ost.c +++ b/drivers/hwtracing/coresight/coresight-ost.c @@ -62,30 +62,30 @@ static int stm_ost_send(void __iomem *addr, const void *data, uint32_t size) uint32_t len = size; if (((unsigned long)data & 0x1) && (size >= 1)) { - writeb_relaxed_no_log(*(uint8_t *)data, addr); + writeb_relaxed(*(uint8_t *)data, addr); data++; size--; } if (((unsigned long)data & 0x2) && (size >= 2)) { - writew_relaxed_no_log(*(uint16_t *)data, addr); + writew_relaxed(*(uint16_t *)data, addr); data += 2; size -= 2; } /* now we are 32bit aligned */ while (size >= 4) { - writel_relaxed_no_log(*(uint32_t *)data, addr); + writel_relaxed(*(uint32_t *)data, addr); data += 4; size -= 4; } if (size >= 2) { - writew_relaxed_no_log(*(uint16_t *)data, addr); + writew_relaxed(*(uint16_t *)data, addr); data += 2; size -= 2; } if (size >= 1) { - writeb_relaxed_no_log(*(uint8_t *)data, addr); + writeb_relaxed(*(uint8_t *)data, addr); data++; size--; } diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index ddbcae296e2b..3011eae60c13 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -406,7 +406,7 @@ static void gi2c_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb_str, break; } if (cb_str->cb_event != MSM_GPI_QUP_NOTIFY) - GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev, + GENI_SE_DBG(gi2c->ipcl, true, gi2c->dev, "GSI QN err:0x%x, status:0x%x, err:%d\n", cb_str->error_log.error_code, m_stat, cb_str->cb_event); diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 03abfe8b9a58..5557f1bd2356 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -2213,14 +2213,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, cfg->cbndx = ret; - if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_GEOMETRY))) { - /* Geometry is not set use the default geometry */ - domain->geometry.aperture_start = 0; - domain->geometry.aperture_end = (1UL << ias) - 1; - if (domain->geometry.aperture_end >= SZ_1G * 4ULL) - domain->geometry.aperture_end = (SZ_1G * 4ULL) - 1; - } - if (arm_smmu_is_slave_side_secure(smmu_domain)) { smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) { .quirks = quirks, @@ -2231,8 +2223,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, }, .tlb = tlb_ops, .iommu_dev = smmu->dev, - .iova_base = domain->geometry.aperture_start, - .iova_end = domain->geometry.aperture_end, }; fmt = ARM_MSM_SECURE; } else { @@ -2243,8 +2233,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, .oas = oas, .tlb = tlb_ops, .iommu_dev = smmu->dev, - .iova_base = domain->geometry.aperture_start, - .iova_end = domain->geometry.aperture_end, }; } @@ -3533,7 +3521,7 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, ret = -ENODEV; break; } - info->ops = smmu_domain->pgtbl_ops; + info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds; ret = 0; break; } @@ -3794,6 +3782,7 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, ret = 0; break; } + case DOMAIN_ATTR_CB_STALL_DISABLE: if (*((int *)data)) smmu_domain->attributes |= @@ -3806,44 +3795,6 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, 1 << DOMAIN_ATTR_NO_CFRE; ret = 0; break; - case DOMAIN_ATTR_GEOMETRY: { - struct iommu_domain_geometry *geometry = - (struct iommu_domain_geometry *)data; - - if (smmu_domain->smmu != NULL) { - dev_err(smmu_domain->smmu->dev, - "cannot set geometry attribute while attached\n"); - ret = -EBUSY; - break; - } - - if (geometry->aperture_start >= SZ_1G * 4ULL || - geometry->aperture_end >= SZ_1G * 4ULL) { - pr_err("fastmap does not support IOVAs >= 4GB\n"); - ret = -EINVAL; - break; - } - if (smmu_domain->attributes - & (1 << DOMAIN_ATTR_GEOMETRY)) { - if (geometry->aperture_start - < domain->geometry.aperture_start) - domain->geometry.aperture_start = - geometry->aperture_start; - - if (geometry->aperture_end - > domain->geometry.aperture_end) - domain->geometry.aperture_end = - geometry->aperture_end; - } else { - smmu_domain->attributes |= 1 << DOMAIN_ATTR_GEOMETRY; - domain->geometry.aperture_start = - geometry->aperture_start; - domain->geometry.aperture_end = geometry->aperture_end; - } - ret = 0; - break; - } - default: ret = -ENODEV; } diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c index ec4ba8670b9a..08deff94e783 100644 --- a/drivers/iommu/dma-mapping-fast.c +++ b/drivers/iommu/dma-mapping-fast.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -21,7 +21,6 @@ #include #include #include -#include "io-pgtable.h" #include #include @@ -30,6 +29,14 @@ #define FAST_PAGE_SHIFT 12 #define FAST_PAGE_SIZE (1UL << FAST_PAGE_SHIFT) #define FAST_PAGE_MASK (~(PAGE_SIZE - 1)) +#define FAST_PTE_ADDR_MASK ((av8l_fast_iopte)0xfffffffff000) +#define FAST_MAIR_ATTR_IDX_CACHE 1 +#define FAST_PTE_ATTRINDX_SHIFT 2 +#define FAST_PTE_ATTRINDX_MASK 0x7 +#define FAST_PTE_SH_SHIFT 8 +#define FAST_PTE_SH_MASK (((av8l_fast_iopte)0x3) << FAST_PTE_SH_SHIFT) +#define FAST_PTE_SH_OS (((av8l_fast_iopte)2) << FAST_PTE_SH_SHIFT) +#define FAST_PTE_SH_IS (((av8l_fast_iopte)3) << FAST_PTE_SH_SHIFT) static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot, bool coherent) @@ -54,6 +61,27 @@ static int __get_iommu_pgprot(unsigned long attrs, int prot, return prot; } +static void fast_dmac_clean_range(struct dma_fast_smmu_mapping *mapping, + void *start, void *end) +{ + if (!mapping->is_smmu_pt_coherent) + dmac_clean_range(start, end); +} + +static bool __fast_is_pte_coherent(av8l_fast_iopte *ptep) +{ + int attr_idx = (*ptep & (FAST_PTE_ATTRINDX_MASK << + FAST_PTE_ATTRINDX_SHIFT)) >> + FAST_PTE_ATTRINDX_SHIFT; + + if ((attr_idx == FAST_MAIR_ATTR_IDX_CACHE) && + (((*ptep & FAST_PTE_SH_MASK) == FAST_PTE_SH_IS) || + (*ptep & FAST_PTE_SH_MASK) == FAST_PTE_SH_OS)) + return true; + + return false; +} + static bool is_dma_coherent(struct device *dev, unsigned long attrs) { bool is_coherent; @@ -173,11 +201,7 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping, iommu_tlbiall(mapping->domain); mapping->have_stale_tlbs = false; - av8l_fast_clear_stale_ptes(mapping->pgtbl_ops, - mapping->domain->geometry.aperture_start, - mapping->base, - mapping->base + mapping->size - 1, - skip_sync); + av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds, skip_sync); } iova = (bit << FAST_PAGE_SHIFT) + mapping->base; @@ -350,10 +374,12 @@ static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page, struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast; dma_addr_t iova; unsigned long flags; + av8l_fast_iopte *pmd; phys_addr_t phys_plus_off = page_to_phys(page) + offset; phys_addr_t phys_to_map = round_down(phys_plus_off, FAST_PAGE_SIZE); unsigned long offset_from_phys_to_map = phys_plus_off & ~FAST_PAGE_MASK; size_t len = ALIGN(size + offset_from_phys_to_map, FAST_PAGE_SIZE); + int nptes = len >> FAST_PAGE_SHIFT; bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC); int prot = __fast_dma_direction_to_prot(dir); bool is_coherent = is_dma_coherent(dev, attrs); @@ -371,10 +397,13 @@ static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page, if (unlikely(iova == DMA_ERROR_CODE)) goto fail; - if (unlikely(av8l_fast_map_public(mapping->pgtbl_ops, iova, - phys_to_map, len, prot))) + pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova); + + if (unlikely(av8l_fast_map_public(pmd, phys_to_map, len, prot))) goto fail_free_iova; + fast_dmac_clean_range(mapping, pmd, pmd + nptes); + spin_unlock_irqrestore(&mapping->lock, flags); trace_map(mapping->domain, iova, phys_to_map, len, prot); @@ -393,23 +422,20 @@ static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova, { struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast; unsigned long flags; + av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova); unsigned long offset = iova & ~FAST_PAGE_MASK; size_t len = ALIGN(size + offset, FAST_PAGE_SIZE); + int nptes = len >> FAST_PAGE_SHIFT; + struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK)); bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC); bool is_coherent = is_dma_coherent(dev, attrs); - if (!skip_sync && !is_coherent) { - phys_addr_t phys; - - phys = av8l_fast_iova_to_phys_public(mapping->pgtbl_ops, iova); - WARN_ON(!phys); - - __fast_dma_page_dev_to_cpu(phys_to_page(phys), offset, - size, dir); - } + if (!skip_sync && !is_coherent) + __fast_dma_page_dev_to_cpu(page, offset, size, dir); spin_lock_irqsave(&mapping->lock, flags); - av8l_fast_unmap_public(mapping->pgtbl_ops, iova, len); + av8l_fast_unmap_public(pmd, len); + fast_dmac_clean_range(mapping, pmd, pmd + nptes); __fast_smmu_free_iova(mapping, iova - offset, len); spin_unlock_irqrestore(&mapping->lock, flags); @@ -420,34 +446,24 @@ static void fast_smmu_sync_single_for_cpu(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction dir) { struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast; + av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova); unsigned long offset = iova & ~FAST_PAGE_MASK; + struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK)); - if (!av8l_fast_iova_coherent_public(mapping->pgtbl_ops, iova)) { - phys_addr_t phys; - - phys = av8l_fast_iova_to_phys_public(mapping->pgtbl_ops, iova); - WARN_ON(!phys); - - __fast_dma_page_dev_to_cpu(phys_to_page(phys), offset, - size, dir); - } + if (!__fast_is_pte_coherent(pmd)) + __fast_dma_page_dev_to_cpu(page, offset, size, dir); } static void fast_smmu_sync_single_for_device(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction dir) { struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast; + av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova); unsigned long offset = iova & ~FAST_PAGE_MASK; + struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK)); - if (!av8l_fast_iova_coherent_public(mapping->pgtbl_ops, iova)) { - phys_addr_t phys; - - phys = av8l_fast_iova_to_phys_public(mapping->pgtbl_ops, iova); - WARN_ON(!phys); - - __fast_dma_page_cpu_to_dev(phys_to_page(phys), offset, - size, dir); - } + if (!__fast_is_pte_coherent(pmd)) + __fast_dma_page_cpu_to_dev(page, offset, size, dir); } static int fast_smmu_map_sg(struct device *dev, struct scatterlist *sg, @@ -522,6 +538,7 @@ static void *fast_smmu_alloc(struct device *dev, size_t size, struct sg_table sgt; dma_addr_t dma_addr, iova_iter; void *addr; + av8l_fast_iopte *ptep; unsigned long flags; struct sg_mapping_iter miter; size_t count = ALIGN(size, SZ_4K) >> PAGE_SHIFT; @@ -579,14 +596,17 @@ static void *fast_smmu_alloc(struct device *dev, size_t size, sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG | SG_MITER_ATOMIC); while (sg_miter_next(&miter)) { + int nptes = miter.length >> FAST_PAGE_SHIFT; + + ptep = iopte_pmd_offset(mapping->pgtbl_pmds, iova_iter); if (unlikely(av8l_fast_map_public( - mapping->pgtbl_ops, iova_iter, - page_to_phys(miter.page), + ptep, page_to_phys(miter.page), miter.length, prot))) { dev_err(dev, "no map public\n"); /* TODO: unwind previously successful mappings */ goto out_free_iova; } + fast_dmac_clean_range(mapping, ptep, ptep + nptes); iova_iter += miter.length; } sg_miter_stop(&miter); @@ -606,7 +626,9 @@ static void *fast_smmu_alloc(struct device *dev, size_t size, out_unmap: /* need to take the lock again for page tables and iova */ spin_lock_irqsave(&mapping->lock, flags); - av8l_fast_unmap_public(mapping->pgtbl_ops, dma_addr, size); + ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_addr); + av8l_fast_unmap_public(ptep, size); + fast_dmac_clean_range(mapping, ptep, ptep + count); out_free_iova: __fast_smmu_free_iova(mapping, dma_addr, size); spin_unlock_irqrestore(&mapping->lock, flags); @@ -625,6 +647,7 @@ static void fast_smmu_free(struct device *dev, size_t size, struct vm_struct *area; struct page **pages; size_t count = ALIGN(size, SZ_4K) >> FAST_PAGE_SHIFT; + av8l_fast_iopte *ptep; unsigned long flags; size = ALIGN(size, SZ_4K); @@ -635,8 +658,10 @@ static void fast_smmu_free(struct device *dev, size_t size, pages = area->pages; dma_common_free_remap(vaddr, size, VM_USERMAP, false); + ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_handle); spin_lock_irqsave(&mapping->lock, flags); - av8l_fast_unmap_public(mapping->pgtbl_ops, dma_handle, size); + av8l_fast_unmap_public(ptep, size); + fast_dmac_clean_range(mapping, ptep, ptep + count); __fast_smmu_free_iova(mapping, dma_handle, size); spin_unlock_irqrestore(&mapping->lock, flags); __fast_smmu_free_pages(pages, count); @@ -742,20 +767,16 @@ static int fast_smmu_mapping_error(struct device *dev, static void __fast_smmu_mapped_over_stale(struct dma_fast_smmu_mapping *fast, void *data) { - av8l_fast_iopte *pmds, *ptep = data; + av8l_fast_iopte *ptep = data; dma_addr_t iova; unsigned long bitmap_idx; - struct io_pgtable *tbl; - tbl = container_of(fast->pgtbl_ops, struct io_pgtable, ops); - pmds = tbl->cfg.av8l_fast_cfg.pmds; - - bitmap_idx = (unsigned long)(ptep - pmds); + bitmap_idx = (unsigned long)(ptep - fast->pgtbl_pmds); iova = bitmap_idx << FAST_PAGE_SHIFT; dev_err(fast->dev, "Mapped over stale tlb at %pa\n", &iova); dev_err(fast->dev, "bitmap (failure at idx %lu):\n", bitmap_idx); dev_err(fast->dev, "ptep: %p pmds: %p diff: %lu\n", ptep, - pmds, bitmap_idx); + fast->pgtbl_pmds, bitmap_idx); print_hex_dump(KERN_ERR, "bmap: ", DUMP_PREFIX_ADDRESS, 32, 8, fast->bitmap, fast->bitmap_size, false); } @@ -801,7 +822,7 @@ static const struct dma_map_ops fast_smmu_dma_ops = { * * Creates a mapping structure which holds information about used/unused IO * address ranges, which is required to perform mapping with IOMMU aware - * functions. The only VA range supported is [0, 4GB]. + * functions. The only VA range supported is [0, 4GB). * * The client device need to be attached to the mapping with * fast_smmu_attach_device function. @@ -936,16 +957,19 @@ int fast_smmu_init_mapping(struct device *dev, fast_smmu_reserve_pci_windows(dev, mapping->fast); - domain->geometry.aperture_start = mapping->base; - domain->geometry.aperture_end = mapping->base + size - 1; - if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PGTBL_INFO, &info)) { dev_err(dev, "Couldn't get page table info\n"); err = -EINVAL; goto release_mapping; } - mapping->fast->pgtbl_ops = (struct io_pgtable_ops *)info.ops; + mapping->fast->pgtbl_pmds = info.pmds; + + if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT, + &mapping->fast->is_smmu_pt_coherent)) { + err = -EINVAL; + goto release_mapping; + } mapping->fast->notifier.notifier_call = fast_smmu_notify; av8l_register_notify(&mapping->fast->notifier); diff --git a/drivers/iommu/io-pgtable-fast.c b/drivers/iommu/io-pgtable-fast.c index 8f26083ba76a..bf34c646fe57 100644 --- a/drivers/iommu/io-pgtable-fast.c +++ b/drivers/iommu/io-pgtable-fast.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include @@ -43,10 +42,6 @@ struct av8l_fast_io_pgtable { av8l_fast_iopte *puds[4]; av8l_fast_iopte *pmds; struct page **pages; /* page table memory */ - int nr_pages; - dma_addr_t base; - dma_addr_t start; - dma_addr_t end; }; /* Page table bits */ @@ -63,7 +58,6 @@ struct av8l_fast_io_pgtable { #define AV8L_FAST_PTE_SH_NS (((av8l_fast_iopte)0) << 8) #define AV8L_FAST_PTE_SH_OS (((av8l_fast_iopte)2) << 8) #define AV8L_FAST_PTE_SH_IS (((av8l_fast_iopte)3) << 8) -#define AV8L_FAST_PTE_SH_MASK (((av8l_fast_iopte)3) << 8) #define AV8L_FAST_PTE_NS (((av8l_fast_iopte)1) << 5) #define AV8L_FAST_PTE_VALID (((av8l_fast_iopte)1) << 0) @@ -81,7 +75,6 @@ struct av8l_fast_io_pgtable { #define AV8L_FAST_PTE_AP_PRIV_RO (((av8l_fast_iopte)2) << 6) #define AV8L_FAST_PTE_AP_RO (((av8l_fast_iopte)3) << 6) #define AV8L_FAST_PTE_ATTRINDX_SHIFT 2 -#define AV8L_FAST_PTE_ATTRINDX_MASK 0x7 #define AV8L_FAST_PTE_nG (((av8l_fast_iopte)1) << 11) /* Stage-2 PTE */ @@ -149,13 +142,6 @@ struct av8l_fast_io_pgtable { #define AV8L_FAST_PAGE_SHIFT 12 -#define PTE_MAIR_IDX(pte) \ - ((pte >> AV8L_FAST_PTE_ATTRINDX_SHIFT) && \ - AV8L_FAST_PTE_ATTRINDX_MASK) - -#define PTE_SH_IDX(pte) (pte & AV8L_FAST_PTE_SH_MASK) - -#define iopte_pmd_offset(pmds, base, iova) (pmds + ((iova - base) >> 12)) #ifdef CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB @@ -184,15 +170,12 @@ static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep) } } -void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops, u64 base, - u64 start, u64 end, bool skip_sync) +void av8l_fast_clear_stale_ptes(av8l_fast_iopte *pmds, bool skip_sync) { int i; - struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops); - av8l_fast_iopte *pmdp = iopte_pmd_offset(pmds, base, start); + av8l_fast_iopte *pmdp = pmds; - for (i = start >> AV8L_FAST_PAGE_SHIFT; - i <= (end >> AV8L_FAST_PAGE_SHIFT); ++i) { + for (i = 0; i < ((SZ_1G * 4UL) >> AV8L_FAST_PAGE_SHIFT); ++i) { if (!(*pmdp & AV8L_FAST_PTE_VALID)) { *pmdp = 0; if (!skip_sync) @@ -207,18 +190,11 @@ static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep) } #endif -static void av8l_clean_range(struct io_pgtable_ops *ops, - av8l_fast_iopte *start, av8l_fast_iopte *end) -{ - struct io_pgtable *iop = iof_pgtable_ops_to_pgtable(ops); - - if (!(iop->cfg.quirks & IO_PGTABLE_QUIRK_NO_DMA)) - dmac_clean_range(start, end); -} - -static av8l_fast_iopte -av8l_fast_prot_to_pte(struct av8l_fast_io_pgtable *data, int prot) +/* caller must take care of cache maintenance on *ptep */ +int av8l_fast_map_public(av8l_fast_iopte *ptep, phys_addr_t paddr, size_t size, + int prot) { + int i, nptes = size >> AV8L_FAST_PAGE_SHIFT; av8l_fast_iopte pte = AV8L_FAST_PTE_XN | AV8L_FAST_PTE_TYPE_PAGE | AV8L_FAST_PTE_AF @@ -240,67 +216,58 @@ av8l_fast_prot_to_pte(struct av8l_fast_io_pgtable *data, int prot) else pte |= AV8L_FAST_PTE_AP_RW; - return pte; + paddr &= AV8L_FAST_PTE_ADDR_MASK; + for (i = 0; i < nptes; i++, paddr += SZ_4K) { + __av8l_check_for_stale_tlb(ptep + i); + *(ptep + i) = pte | paddr; + } + + return 0; } static int av8l_fast_map(struct io_pgtable_ops *ops, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops); - av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, data->base, iova); - unsigned long i, nptes = size >> AV8L_FAST_PAGE_SHIFT; - av8l_fast_iopte pte; + av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, iova); + unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT; - pte = av8l_fast_prot_to_pte(data, prot); - paddr &= AV8L_FAST_PTE_ADDR_MASK; - for (i = 0; i < nptes; i++, paddr += SZ_4K) { - __av8l_check_for_stale_tlb(ptep + i); - *(ptep + i) = pte | paddr; - } - av8l_clean_range(ops, ptep, ptep + nptes); + av8l_fast_map_public(ptep, paddr, size, prot); + dmac_clean_range(ptep, ptep + nptes); return 0; } -int av8l_fast_map_public(struct io_pgtable_ops *ops, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) +static void __av8l_fast_unmap(av8l_fast_iopte *ptep, size_t size, + bool need_stale_tlb_tracking) { - return av8l_fast_map(ops, iova, paddr, size, prot); -} - -static size_t -__av8l_fast_unmap(struct io_pgtable_ops *ops, unsigned long iova, - size_t size, bool allow_stale_tlb) -{ - struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops); - unsigned long nptes; - av8l_fast_iopte *ptep; - int val = allow_stale_tlb + unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT; + int val = need_stale_tlb_tracking ? AV8L_FAST_PTE_UNMAPPED_NEED_TLBI : 0; - ptep = iopte_pmd_offset(data->pmds, data->base, iova); - nptes = size >> AV8L_FAST_PAGE_SHIFT; - memset(ptep, val, sizeof(*ptep) * nptes); - av8l_clean_range(ops, ptep, ptep + nptes); - if (!allow_stale_tlb) - io_pgtable_tlb_flush_all(&data->iop); - - return size; } -/* caller must take care of tlb cache maintenance */ -void av8l_fast_unmap_public(struct io_pgtable_ops *ops, unsigned long iova, - size_t size) +/* caller must take care of cache maintenance on *ptep */ +void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size) { - __av8l_fast_unmap(ops, iova, size, true); + __av8l_fast_unmap(ptep, size, true); } static size_t av8l_fast_unmap(struct io_pgtable_ops *ops, unsigned long iova, size_t size) { - return __av8l_fast_unmap(ops, iova, size, false); + struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops); + struct io_pgtable *iop = &data->iop; + av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, iova); + unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT; + + __av8l_fast_unmap(ptep, size, false); + dmac_clean_range(ptep, ptep + nptes); + io_pgtable_tlb_flush_all(iop); + + return size; } #if defined(CONFIG_ARM64) @@ -345,12 +312,6 @@ static phys_addr_t av8l_fast_iova_to_phys(struct io_pgtable_ops *ops, return phys | (iova & 0xfff); } -phys_addr_t av8l_fast_iova_to_phys_public(struct io_pgtable_ops *ops, - unsigned long iova) -{ - return av8l_fast_iova_to_phys(ops, iova); -} - static int av8l_fast_map_sg(struct io_pgtable_ops *ops, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot, size_t *size) @@ -358,23 +319,6 @@ static int av8l_fast_map_sg(struct io_pgtable_ops *ops, unsigned long iova, return -ENODEV; } -static bool av8l_fast_iova_coherent(struct io_pgtable_ops *ops, - unsigned long iova) -{ - struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops); - av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, data->base, iova); - - return ((PTE_MAIR_IDX(*ptep) == AV8L_FAST_MAIR_ATTR_IDX_CACHE) && - ((PTE_SH_IDX(*ptep) == AV8L_FAST_PTE_SH_OS) || - (PTE_SH_IDX(*ptep) == AV8L_FAST_PTE_SH_IS))); -} - -bool av8l_fast_iova_coherent_public(struct io_pgtable_ops *ops, - unsigned long iova) -{ - return av8l_fast_iova_coherent(ops, iova); -} - static struct av8l_fast_io_pgtable * av8l_fast_alloc_pgtable_data(struct io_pgtable_cfg *cfg) { @@ -389,14 +333,13 @@ av8l_fast_alloc_pgtable_data(struct io_pgtable_cfg *cfg) .map_sg = av8l_fast_map_sg, .unmap = av8l_fast_unmap, .iova_to_phys = av8l_fast_iova_to_phys, - .is_iova_coherent = av8l_fast_iova_coherent, }; return data; } /* - * We need max 1 page for the pgd, 4 pages for puds (1GB VA per pud page) and + * We need 1 page for the pgd, 4 pages for puds (1GB VA per pud page) and * 2048 pages for pmds (each pud page contains 512 table entries, each * pointing to a pmd). */ @@ -405,38 +348,12 @@ av8l_fast_alloc_pgtable_data(struct io_pgtable_cfg *cfg) #define NUM_PMD_PAGES 2048 #define NUM_PGTBL_PAGES (NUM_PGD_PAGES + NUM_PUD_PAGES + NUM_PMD_PAGES) -/* undefine arch specific definitions which depends on page table format */ -#undef pud_index -#undef pud_mask -#undef pud_next -#undef pmd_index -#undef pmd_mask -#undef pmd_next - -#define pud_index(addr) (((addr) >> 30) & 0x3) -#define pud_mask(addr) ((addr) & ~((1UL << 30) - 1)) -#define pud_next(addr, end) \ -({ unsigned long __boundary = pud_mask(addr + (1UL << 30));\ - (__boundary - 1 < (end) - 1) ? __boundary : (end); \ -}) - -#define pmd_index(addr) (((addr) >> 21) & 0x1ff) -#define pmd_mask(addr) ((addr) & ~((1UL << 21) - 1)) -#define pmd_next(addr, end) \ -({ unsigned long __boundary = pmd_mask(addr + (1UL << 21));\ - (__boundary - 1 < (end) - 1) ? __boundary : (end); \ -}) - static int av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data, struct io_pgtable_cfg *cfg, void *cookie) { int i, j, pg = 0; struct page **pages, *page; - dma_addr_t base = cfg->iova_base; - dma_addr_t end = cfg->iova_end; - dma_addr_t pud, pmd; - int pmd_pg_index; pages = kmalloc(sizeof(*pages) * NUM_PGTBL_PAGES, __GFP_NOWARN | __GFP_NORETRY); @@ -454,11 +371,10 @@ av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data, data->pgd = page_address(page); /* - * We need max 2048 entries at level 2 to map 4GB of VA space. A page - * can hold 512 entries, so we need max 4 pages. + * We need 2048 entries at level 2 to map 4GB of VA space. A page + * can hold 512 entries, so we need 4 pages. */ - for (i = pud_index(base), pud = base; pud < end; - ++i, pud = pud_next(pud, end)) { + for (i = 0; i < 4; ++i) { av8l_fast_iopte pte, *ptep; page = alloc_page(GFP_KERNEL | __GFP_ZERO); @@ -473,15 +389,12 @@ av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data, dmac_clean_range(data->pgd, data->pgd + 4); /* - * We have max 4 puds, each of which can point to 512 pmds, so we'll - * have max 2048 pmds, each of which can hold 512 ptes, for a grand + * We have 4 puds, each of which can point to 512 pmds, so we'll + * have 2048 pmds, each of which can hold 512 ptes, for a grand * total of 2048*512=1048576 PTEs. */ - pmd_pg_index = pg; - for (i = pud_index(base), pud = base; pud < end; - ++i, pud = pud_next(pud, end)) { - for (j = pmd_index(pud), pmd = pud; pmd < pud_next(pud, end); - ++j, pmd = pmd_next(pmd, end)) { + for (i = 0; i < 4; ++i) { + for (j = 0; j < 512; ++j) { av8l_fast_iopte pte, *pudp; void *addr; @@ -500,21 +413,21 @@ av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data, dmac_clean_range(data->puds[i], data->puds[i] + 512); } + if (WARN_ON(pg != NUM_PGTBL_PAGES)) + goto err_free_pages; + /* * We map the pmds into a virtually contiguous space so that we * don't have to traverse the first two levels of the page tables * to find the appropriate pud. Instead, it will be a simple * offset from the virtual base of the pmds. */ - data->pmds = vmap(&pages[pmd_pg_index], pg - pmd_pg_index, + data->pmds = vmap(&pages[NUM_PGD_PAGES + NUM_PUD_PAGES], NUM_PMD_PAGES, VM_IOREMAP, PAGE_KERNEL); if (!data->pmds) goto err_free_pages; data->pages = pages; - data->nr_pages = pg; - data->base = base; - data->end = end; return 0; err_free_pages: @@ -620,7 +533,7 @@ static void av8l_fast_free_pgtable(struct io_pgtable *iop) struct av8l_fast_io_pgtable *data = iof_pgtable_to_data(iop); vunmap(data->pmds); - for (i = 0; i < data->nr_pages; ++i) + for (i = 0; i < NUM_PGTBL_PAGES; ++i) __free_page(data->pages[i]); kvfree(data->pages); kfree(data); @@ -692,7 +605,6 @@ static int __init av8l_fast_positive_testing(void) struct av8l_fast_io_pgtable *data; av8l_fast_iopte *pmds; u64 max = SZ_1G * 4ULL - 1; - u64 base = 0; cfg = (struct io_pgtable_cfg) { .quirks = 0, @@ -700,8 +612,6 @@ static int __init av8l_fast_positive_testing(void) .ias = 32, .oas = 32, .pgsize_bitmap = SZ_4K, - .iova_base = base, - .iova_end = max, }; cfg_cookie = &cfg; @@ -714,81 +624,81 @@ static int __init av8l_fast_positive_testing(void) pmds = data->pmds; /* map the entire 4GB VA space with 4K map calls */ - for (iova = base; iova < max; iova += SZ_4K) { + for (iova = 0; iova < max; iova += SZ_4K) { if (WARN_ON(ops->map(ops, iova, iova, SZ_4K, IOMMU_READ))) { failed++; continue; } } - if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base, - base, max - base))) + if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0, + max))) failed++; /* unmap it all */ - for (iova = base; iova < max; iova += SZ_4K) { + for (iova = 0; iova < max; iova += SZ_4K) { if (WARN_ON(ops->unmap(ops, iova, SZ_4K) != SZ_4K)) failed++; } /* sweep up TLB proving PTEs */ - av8l_fast_clear_stale_ptes(ops, base, base, max, false); + av8l_fast_clear_stale_ptes(pmds, false); /* map the entire 4GB VA space with 8K map calls */ - for (iova = base; iova < max; iova += SZ_8K) { + for (iova = 0; iova < max; iova += SZ_8K) { if (WARN_ON(ops->map(ops, iova, iova, SZ_8K, IOMMU_READ))) { failed++; continue; } } - if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base, - base, max - base))) + if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0, + max))) failed++; /* unmap it all with 8K unmap calls */ - for (iova = base; iova < max; iova += SZ_8K) { + for (iova = 0; iova < max; iova += SZ_8K) { if (WARN_ON(ops->unmap(ops, iova, SZ_8K) != SZ_8K)) failed++; } /* sweep up TLB proving PTEs */ - av8l_fast_clear_stale_ptes(ops, base, base, max, false); + av8l_fast_clear_stale_ptes(pmds, false); /* map the entire 4GB VA space with 16K map calls */ - for (iova = base; iova < max; iova += SZ_16K) { + for (iova = 0; iova < max; iova += SZ_16K) { if (WARN_ON(ops->map(ops, iova, iova, SZ_16K, IOMMU_READ))) { failed++; continue; } } - if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base, - base, max - base))) + if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0, + max))) failed++; /* unmap it all */ - for (iova = base; iova < max; iova += SZ_16K) { + for (iova = 0; iova < max; iova += SZ_16K) { if (WARN_ON(ops->unmap(ops, iova, SZ_16K) != SZ_16K)) failed++; } /* sweep up TLB proving PTEs */ - av8l_fast_clear_stale_ptes(ops, base, base, max, false); + av8l_fast_clear_stale_ptes(pmds, false); /* map the entire 4GB VA space with 64K map calls */ - for (iova = base; iova < max; iova += SZ_64K) { + for (iova = 0; iova < max; iova += SZ_64K) { if (WARN_ON(ops->map(ops, iova, iova, SZ_64K, IOMMU_READ))) { failed++; continue; } } - if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base, - base, max - base))) + if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0, + max))) failed++; /* unmap it all at once */ - if (WARN_ON(ops->unmap(ops, base, max - base) != (max - base))) + if (WARN_ON(ops->unmap(ops, 0, max) != max)) failed++; free_io_pgtable_ops(ops); diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h index 294b9aea0b8b..8e03a2c37780 100644 --- a/drivers/iommu/io-pgtable.h +++ b/drivers/iommu/io-pgtable.h @@ -114,8 +114,6 @@ struct io_pgtable_cfg { unsigned int oas; const struct iommu_gather_ops *tlb; struct device *iommu_dev; - dma_addr_t iova_base; - dma_addr_t iova_end; /* Low-level data specific to the table format */ union { diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 7de7727f50f9..370e82d4e8a5 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -28,10 +28,8 @@ #include #include #include -#include #include - #include #include #include @@ -114,7 +112,7 @@ static void gic_do_wait_for_rwp(void __iomem *base) { u32 count = 1000000; /* 1s! */ - while (readl_relaxed_no_log(base + GICD_CTLR) & GICD_CTLR_RWP) { + while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) { count--; if (!count) { pr_err_ratelimited("RWP timeout, gone fishing\n"); @@ -232,8 +230,7 @@ static int gic_peek_irq(struct irq_data *d, u32 offset) else base = gic_data.dist_base; - return !!(readl_relaxed_no_log - (base + offset + (gic_irq(d) / 32) * 4) & mask); + return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask); } static void gic_poke_irq(struct irq_data *d, u32 offset) @@ -563,7 +560,6 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) { int err; - uncached_logk(LOGK_IRQ, (void *)(uintptr_t)irqnr); if (static_key_true(&supports_deactivate)) gic_write_eoir(irqnr); else @@ -584,7 +580,6 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs continue; } if (irqnr < 16) { - uncached_logk(LOGK_IRQ, (void *)(uintptr_t)irqnr); gic_write_eoir(irqnr); if (static_key_true(&supports_deactivate)) gic_write_dir(irqnr); diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index e4020bb8cd2d..e98c5f1a9577 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -41,7 +41,6 @@ #include #include #include -#include #ifdef CONFIG_PM #include #endif @@ -514,7 +513,6 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); isb(); handle_domain_irq(gic->domain, irqnr, regs); - uncached_logk(LOGK_IRQ, (void *)(uintptr_t)irqnr); continue; } if (irqnr < 16) { @@ -532,7 +530,6 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) smp_rmb(); handle_IPI(irqnr, regs); #endif - uncached_logk(LOGK_IRQ, (void *)(uintptr_t)irqnr); continue; } break; diff --git a/drivers/irqchip/qcom/pdc.c b/drivers/irqchip/qcom/pdc.c index acc995e2e1b1..68dc2b1fa33c 100644 --- a/drivers/irqchip/qcom/pdc.c +++ b/drivers/irqchip/qcom/pdc.c @@ -376,6 +376,10 @@ int qcom_pdc_init(struct device_node *node, if (pin_count % IRQS_PER_REG) max_enable_regs++; + if (pdc_domain->flags & IRQ_DOMAIN_NAME_ALLOCATED) { + pdc_domain->flags &= ~IRQ_DOMAIN_NAME_ALLOCATED; + kfree(pdc_domain->name); + } pdc_domain->name = "qcom,pdc"; return 0; diff --git a/drivers/mailbox/msm_qmp.c b/drivers/mailbox/msm_qmp.c index 6df1c0d3baef..226d1d7f84ff 100644 --- a/drivers/mailbox/msm_qmp.c +++ b/drivers/mailbox/msm_qmp.c @@ -39,17 +39,11 @@ #define MSG_RAM_ALIGN_BYTES 3 #define QMP_IPC_LOG_PAGE_CNT 2 -#define QMP_INFO(ctxt, x, ...) \ -do { \ - if (ctxt) \ - ipc_log_string(ctxt, "[%s]: "x, __func__, ##__VA_ARGS__); \ -} while (0) +#define QMP_INFO(ctxt, x, ...) ((void)0) #define QMP_ERR(ctxt, x, ...) \ do { \ pr_err_ratelimited("[%s]: "x, __func__, ##__VA_ARGS__); \ - if (ctxt) \ - ipc_log_string(ctxt, "[%s]: "x, __func__, ##__VA_ARGS__); \ } while (0) /** diff --git a/drivers/mailbox/qcom-rpmh-mailbox.c b/drivers/mailbox/qcom-rpmh-mailbox.c index 2f5a5c384947..45d2887aaaa5 100644 --- a/drivers/mailbox/qcom-rpmh-mailbox.c +++ b/drivers/mailbox/qcom-rpmh-mailbox.c @@ -1015,7 +1015,7 @@ tx_fail: /* If we were just busy waiting for TCS, dump the state and return */ if (ret == -EBUSY) { - dev_err_ratelimited(chan->cl->dev, + dev_dbg(chan->cl->dev, "TCS Busy, retrying RPMH message send\n"); ret = -EAGAIN; } diff --git a/drivers/media/platform/msm/ais/cam_utils/cam_io_util.c b/drivers/media/platform/msm/ais/cam_utils/cam_io_util.c index 8d5f96ac816f..13a7fdcfa718 100644 --- a/drivers/media/platform/msm/ais/cam_utils/cam_io_util.c +++ b/drivers/media/platform/msm/ais/cam_utils/cam_io_util.c @@ -23,7 +23,7 @@ int cam_io_w(uint32_t data, void __iomem *addr) return -EINVAL; CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data); - writel_relaxed_no_log(data, addr); + writel_relaxed(data, addr); return 0; } @@ -36,7 +36,7 @@ int cam_io_w_mb(uint32_t data, void __iomem *addr) CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data); /* Ensure previous writes are done */ wmb(); - writel_relaxed_no_log(data, addr); + writel_relaxed(data, addr); /* Ensure previous writes are done */ wmb(); diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c index 5230261888b7..48af5249ff5e 100644 --- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c +++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c @@ -21,6 +21,7 @@ #include "cam_common_util.h" struct sync_device *sync_dev; +static struct kmem_cache *kmem_payload_pool; /* * Flag to determine whether to enqueue cb of a @@ -632,7 +633,7 @@ static int cam_sync_handle_register_user_payload( if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) return -EINVAL; - user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL); + user_payload_kernel = kmem_cache_zalloc(kmem_payload_pool, GFP_KERNEL); if (!user_payload_kernel) return -ENOMEM; @@ -648,7 +649,7 @@ static int cam_sync_handle_register_user_payload( "Error: accessing an uninitialized sync obj = %d", sync_obj); spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); - kfree(user_payload_kernel); + kmem_cache_free(kmem_payload_pool, user_payload_kernel); return -EINVAL; } @@ -662,7 +663,7 @@ static int cam_sync_handle_register_user_payload( CAM_SYNC_USER_PAYLOAD_SIZE * sizeof(__u64)); spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); - kfree(user_payload_kernel); + kmem_cache_free(kmem_payload_pool, user_payload_kernel); return 0; } @@ -676,7 +677,7 @@ static int cam_sync_handle_register_user_payload( user_payload_kernel->payload_data[1]) { spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]); - kfree(user_payload_kernel); + kmem_cache_free(kmem_payload_pool, user_payload_kernel); return -EALREADY; } } @@ -731,7 +732,7 @@ static int cam_sync_handle_deregister_user_payload( user_payload_kernel->payload_data[1] == userpayload_info.payload[1]) { list_del_init(&user_payload_kernel->list); - kfree(user_payload_kernel); + kmem_cache_free(kmem_payload_pool, user_payload_kernel); } } @@ -1133,6 +1134,8 @@ static int __init cam_sync_init(void) { int rc; + kmem_payload_pool = KMEM_CACHE(sync_user_payload, SLAB_HWCACHE_ALIGN | SLAB_PANIC); + rc = platform_device_register(&cam_sync_device); if (rc) return -ENODEV; @@ -1149,6 +1152,8 @@ static void __exit cam_sync_exit(void) platform_driver_unregister(&cam_sync_driver); platform_device_unregister(&cam_sync_device); kfree(sync_dev); + + kmem_cache_destroy(kmem_payload_pool); } module_init(cam_sync_init); diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c index af785af68f40..547f204a1990 100644 --- a/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c +++ b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c @@ -23,7 +23,7 @@ int cam_io_w(uint32_t data, void __iomem *addr) return -EINVAL; CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data); - writel_relaxed_no_log(data, addr); + writel_relaxed(data, addr); return 0; } @@ -36,7 +36,7 @@ int cam_io_w_mb(uint32_t data, void __iomem *addr) CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data); /* Ensure previous writes are done */ wmb(); - writel_relaxed_no_log(data, addr); + writel_relaxed(data, addr); /* Ensure previous writes are done */ wmb(); diff --git a/drivers/media/platform/msm/npu/Kconfig b/drivers/media/platform/msm/npu/Kconfig index 5299ed665bda..d5586f65649c 100644 --- a/drivers/media/platform/msm/npu/Kconfig +++ b/drivers/media/platform/msm/npu/Kconfig @@ -8,3 +8,9 @@ config MSM_NPU This module serves as the common driver for npu which provides acceleration for neural network processing. + +config MSM_NPU_DEBUG_FS + + bool "debugging framework of QTI MSM Neural Processing Unit" + depends on MSM_NPU || DEBUG_FS + default n diff --git a/drivers/media/platform/msm/npu/Makefile b/drivers/media/platform/msm/npu/Makefile index 712f22e2fca9..a93530f67546 100644 --- a/drivers/media/platform/msm/npu/Makefile +++ b/drivers/media/platform/msm/npu/Makefile @@ -1,7 +1,7 @@ obj-$(CONFIG_MSM_NPU) := msm_npu.o -msm_npu-objs := npu_dbg.o \ - npu_dev.o \ - npu_debugfs.o \ +msm_npu-objs := npu_dev.o \ npu_host_ipc.o \ npu_hw_access.o \ npu_mgr.o + +obj-$(CONFIG_MSM_NPU_DEBUG_FS) += npu_debugfs.o npu_dbg.o diff --git a/drivers/media/platform/msm/npu/npu_common.h b/drivers/media/platform/msm/npu/npu_common.h index 9481582da38d..08e2bfff8eca 100644 --- a/drivers/media/platform/msm/npu/npu_common.h +++ b/drivers/media/platform/msm/npu/npu_common.h @@ -270,8 +270,10 @@ struct npu_client { * Function Prototypes * ------------------------------------------------------------------------- */ -int npu_debugfs_init(struct npu_device *npu_dev); -void npu_debugfs_deinit(struct npu_device *npu_dev); +#ifdef CONFIG_MSM_NPU_DEBUG_FS +static inline int npu_debugfs_init(struct npu_device *npu_dev); +static inline void npu_debugfs_deinit(struct npu_device *npu_dev); +#endif int npu_enable_core_power(struct npu_device *npu_dev); void npu_disable_core_power(struct npu_device *npu_dev); diff --git a/drivers/media/platform/msm/npu/npu_debugfs.c b/drivers/media/platform/msm/npu/npu_debugfs.c index fb1f62f64aa1..0b08b4d86af2 100644 --- a/drivers/media/platform/msm/npu/npu_debugfs.c +++ b/drivers/media/platform/msm/npu/npu_debugfs.c @@ -376,7 +376,7 @@ int npu_debugfs_init(struct npu_device *npu_dev) debugfs->root = debugfs_create_dir("npu", NULL); if (IS_ERR_OR_NULL(debugfs->root)) { - pr_err("debugfs_create_dir for npu failed, error %ld\n", + pr_debug("debugfs_create_dir for npu failed, error %ld\n", PTR_ERR(debugfs->root)); return -ENODEV; } diff --git a/drivers/media/platform/msm/npu/npu_dev.c b/drivers/media/platform/msm/npu/npu_dev.c index 265986259a7f..65c189d9d344 100644 --- a/drivers/media/platform/msm/npu/npu_dev.c +++ b/drivers/media/platform/msm/npu/npu_dev.c @@ -2314,7 +2314,9 @@ static int npu_probe(struct platform_device *pdev) if (rc) goto error_driver_init; +#ifdef CONFIG_MSM_NPU_DEBUG_FS npu_debugfs_init(npu_dev); +#endif npu_dev->smmu_ctx.attach_cnt = 0; npu_dev->smmu_ctx.mmu_mapping = arm_iommu_create_mapping( @@ -2371,7 +2373,9 @@ static int npu_remove(struct platform_device *pdev) npu_host_deinit(npu_dev); arm_iommu_detach_device(&(npu_dev->pdev->dev)); arm_iommu_release_mapping(npu_dev->smmu_ctx.mmu_mapping); +#ifdef CONFIG_MSM_NPU_DEBUG_FS npu_debugfs_deinit(npu_dev); +#endif npu_cdsprm_cxlimit_deinit(npu_dev); if (npu_dev->tcdev) thermal_cooling_device_unregister(npu_dev->tcdev); diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c index 539cf843872f..952fd8ac7b5d 100644 --- a/drivers/media/platform/msm/npu/npu_mgr.c +++ b/drivers/media/platform/msm/npu/npu_mgr.c @@ -1845,8 +1845,10 @@ int32_t npu_host_exec_network(struct npu_client *client, mutex_lock(&host_ctx->lock); if (!ret) { pr_err_ratelimited("npu: NPU_IPC_CMD_EXECUTE time out\n"); +#ifdef CONFIG_MSM_NPU_DEBUG_FS /* dump debug stats */ npu_dump_debug_timeout_stats(npu_dev); +#endif network->cmd_pending = false; ret = -ETIMEDOUT; goto exec_done; @@ -1988,8 +1990,10 @@ int32_t npu_host_exec_network_v2(struct npu_client *client, mutex_lock(&host_ctx->lock); if (!ret) { pr_err_ratelimited("npu: NPU_IPC_CMD_EXECUTE_V2 time out\n"); +#ifdef CONFIG_MSM_NPU_DEBUG_FS /* dump debug stats */ npu_dump_debug_timeout_stats(npu_dev); +#endif network->cmd_pending = false; ret = -ETIMEDOUT; goto free_exec_packet; diff --git a/drivers/media/platform/msm/npu_v2/npu_debugfs.c b/drivers/media/platform/msm/npu_v2/npu_debugfs.c index 468cc6b751fb..3e6b9ca68d5b 100644 --- a/drivers/media/platform/msm/npu_v2/npu_debugfs.c +++ b/drivers/media/platform/msm/npu_v2/npu_debugfs.c @@ -363,7 +363,7 @@ int npu_debugfs_init(struct npu_device *npu_dev) debugfs->root = debugfs_create_dir("npu", NULL); if (IS_ERR_OR_NULL(debugfs->root)) { - NPU_ERR("debugfs_create_dir for npu failed, error %ld\n", + NPU_DBG("debugfs_create_dir for npu failed, error %ld\n", PTR_ERR(debugfs->root)); return -ENODEV; } diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c index 88d0b2e76ff6..6af18bc61314 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c @@ -68,12 +68,12 @@ do { \ SDEROT_DBG("SDEREG.W:[%s:0x%X] <= 0x%X\n", #off, (off),\ (u32)(data));\ - writel_relaxed_no_log( \ + writel_relaxed( \ (REGDMA_OP_REGWRITE | \ ((off) & REGDMA_ADDR_OFFSET_MASK)), \ p); \ p += sizeof(u32); \ - writel_relaxed_no_log(data, p); \ + writel_relaxed(data, p); \ p += sizeof(u32); \ } while (0) @@ -81,14 +81,14 @@ do { \ SDEROT_DBG("SDEREG.M:[%s:0x%X] <= 0x%X\n", #off, (off),\ (u32)(data));\ - writel_relaxed_no_log( \ + writel_relaxed( \ (REGDMA_OP_REGMODIFY | \ ((off) & REGDMA_ADDR_OFFSET_MASK)), \ p); \ p += sizeof(u32); \ - writel_relaxed_no_log(mask, p); \ + writel_relaxed(mask, p); \ p += sizeof(u32); \ - writel_relaxed_no_log(data, p); \ + writel_relaxed(data, p); \ p += sizeof(u32); \ } while (0) @@ -96,25 +96,25 @@ do { \ SDEROT_DBG("SDEREG.B:[%s:0x%X:0x%X]\n", #off, (off),\ (u32)(len));\ - writel_relaxed_no_log( \ + writel_relaxed( \ (REGDMA_OP_BLKWRITE_INC | \ ((off) & REGDMA_ADDR_OFFSET_MASK)), \ p); \ p += sizeof(u32); \ - writel_relaxed_no_log(len, p); \ + writel_relaxed(len, p); \ p += sizeof(u32); \ } while (0) #define SDE_REGDMA_BLKWRITE_DATA(p, data) \ do { \ SDEROT_DBG("SDEREG.I:[:] <= 0x%X\n", (u32)(data));\ - writel_relaxed_no_log(data, p); \ + writel_relaxed(data, p); \ p += sizeof(u32); \ } while (0) #define SDE_REGDMA_READ(p, data) \ do { \ - data = readl_relaxed_no_log(p); \ + data = readl_relaxed(p); \ p += sizeof(u32); \ } while (0) @@ -2041,7 +2041,7 @@ static u32 sde_hw_rotator_start_no_regdma(struct sde_hw_rotator_context *ctx, /* Write all command stream to Rotator blocks */ /* Rotator will start right away after command stream finish writing */ while (mem_rdptr < wrptr) { - u32 op = REGDMA_OP_MASK & readl_relaxed_no_log(mem_rdptr); + u32 op = REGDMA_OP_MASK & readl_relaxed(mem_rdptr); switch (op) { case REGDMA_OP_NOP: diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c index 4ab9cae72171..4658f2918898 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c @@ -772,6 +772,8 @@ static struct platform_driver sde_smmu_driver = { .driver = { .name = "sde_smmu", .of_match_table = sde_smmu_dt_match, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, }; diff --git a/drivers/media/platform/msm/vidc/Makefile b/drivers/media/platform/msm/vidc/Makefile index 6e408920224f..29116d52d3f0 100644 --- a/drivers/media/platform/msm/vidc/Makefile +++ b/drivers/media/platform/msm/vidc/Makefile @@ -21,3 +21,5 @@ msm-vidc-objs := msm_v4l2_vidc.o \ obj-$(CONFIG_MSM_VIDC_V4L2) := msm-vidc.o obj-$(CONFIG_MSM_VIDC_V4L2) += governors/ + +ccflags-y := -DCONFIG_DEBUG_FS diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c index 579d3d9a9cac..c8306b70ddf4 100644 --- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c @@ -886,10 +886,13 @@ static struct platform_driver msm_vidc_driver = { }, }; +extern void __init init_vidc_kmem_buf_pool(void); static int __init msm_vidc_init(void) { int rc = 0; + init_vidc_kmem_buf_pool(); + vidc_driver = kzalloc(sizeof(*vidc_driver), GFP_KERNEL); if (!vidc_driver) { @@ -901,9 +904,11 @@ static int __init msm_vidc_init(void) INIT_LIST_HEAD(&vidc_driver->cores); mutex_init(&vidc_driver->lock); vidc_driver->debugfs_root = msm_vidc_debugfs_init_drv(); +#ifdef CONFIG_DEBUG_FS if (!vidc_driver->debugfs_root) dprintk(VIDC_ERR, "Failed to create debugfs for msm_vidc\n"); +#endif rc = platform_driver_register(&msm_vidc_driver); if (rc) { diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c index 740df57d2ead..d2e5861cba70 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_vidc.c @@ -520,6 +520,11 @@ int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b) __func__, inst); return -EINVAL; } + if (inst->in_flush && inst->session_type == MSM_VIDC_DECODER && + b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + dprintk(VIDC_ERR, "%s: session in flush, discarding qbuf\n", __func__); + return -EINVAL; + } for (i = 0; i < b->length; i++) { b->m.planes[i].m.fd = b->m.planes[i].reserved[0]; @@ -1873,7 +1878,7 @@ void *msm_vidc_open(int core_id, int session_type) goto err_invalid_core; } - pr_info(VIDC_DBG_TAG "Opening video instance: %pK, %d\n", + pr_debug(VIDC_DBG_TAG "Opening video instance: %pK, %d\n", "info", inst, session_type); mutex_init(&inst->sync_lock); mutex_init(&inst->bufq[CAPTURE_PORT].lock); @@ -2161,7 +2166,7 @@ int msm_vidc_destroy(struct msm_vidc_inst *inst) msm_vidc_debugfs_deinit_inst(inst); - pr_info(VIDC_DBG_TAG "Closed video instance: %pK\n", + pr_debug(VIDC_DBG_TAG "Closed video instance: %pK\n", "info", inst); kfree(inst); return 0; diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c index 9d53d7f58ada..9a35fed6474c 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c @@ -198,7 +198,7 @@ int msm_comm_vote_bus(struct msm_vidc_core *core) int rc = 0, vote_data_count = 0, i = 0; struct hfi_device *hdev; struct msm_vidc_inst *inst = NULL; - struct vidc_bus_vote_data *vote_data = NULL; + struct vidc_bus_vote_data vote_data[MAX_SUPPORTED_INSTANCES] __aligned(8); bool is_turbo = false; if (!core || !core->device) { @@ -207,19 +207,7 @@ int msm_comm_vote_bus(struct msm_vidc_core *core) } hdev = core->device; - vote_data = kzalloc(sizeof(struct vidc_bus_vote_data) * - MAX_SUPPORTED_INSTANCES, GFP_ATOMIC); - if (!vote_data) { - dprintk(VIDC_DBG, - "vote_data allocation with GFP_ATOMIC failed\n"); - vote_data = kzalloc(sizeof(struct vidc_bus_vote_data) * - MAX_SUPPORTED_INSTANCES, GFP_KERNEL); - if (!vote_data) { - dprintk(VIDC_DBG, - "vote_data allocation failed\n"); - return -EINVAL; - } - } + memset(vote_data, 0, sizeof(struct vidc_bus_vote_data)); mutex_lock(&core->lock); list_for_each_entry(inst, &core->instances, list) { @@ -350,7 +338,6 @@ int msm_comm_vote_bus(struct msm_vidc_core *core) rc = call_hfi_op(hdev, vote_bus, hdev->hfi_device_data, vote_data, vote_data_count); - kfree(vote_data); return rc; } diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c index 4981644bd550..37aaa6e7c709 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c @@ -24,6 +24,13 @@ #include "msm_vidc_clocks.h" #include "msm_cvp.h" +static struct kmem_cache *kmem_buf_pool; + +void __init init_vidc_kmem_buf_pool(void) +{ + kmem_buf_pool = KMEM_CACHE(msm_vidc_buffer, SLAB_HWCACHE_ALIGN | SLAB_PANIC); +} + #define MSM_VIDC_QBUF_BATCH_TIMEOUT 300 #define IS_ALREADY_IN_STATE(__p, __d) (\ (__p >= __d)\ @@ -6654,7 +6661,7 @@ struct msm_vidc_buffer *msm_comm_get_vidc_buffer(struct msm_vidc_inst *inst, if (!found) { /* this is new vb2_buffer */ - mbuf = kzalloc(sizeof(struct msm_vidc_buffer), GFP_KERNEL); + mbuf = kmem_cache_zalloc(kmem_buf_pool, GFP_KERNEL); if (!mbuf) { dprintk(VIDC_ERR, "%s: alloc msm_vidc_buffer failed\n", __func__); @@ -6942,7 +6949,7 @@ static void kref_free_mbuf(struct kref *kref) struct msm_vidc_buffer *mbuf = container_of(kref, struct msm_vidc_buffer, kref); - kfree(mbuf); + kmem_cache_free(kmem_buf_pool, mbuf); } void kref_put_mbuf(struct msm_vidc_buffer *mbuf) diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c index 8d81a1e70288..31f21c8d4320 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c @@ -25,7 +25,7 @@ EXPORT_SYMBOL(msm_vidc_debug_out); /* 0x18 = HFI_DEBUG_MSG_FATAL | HFI_DEBUG_MSG_ERROR */ int msm_vidc_fw_debug = 0x18; -int msm_vidc_fw_debug_mode = 1; +int msm_vidc_fw_debug_mode = 0; int msm_vidc_fw_low_power_mode = 1; bool msm_vidc_fw_coverage = !true; bool msm_vidc_thermal_mitigation_disabled = !true; @@ -287,7 +287,9 @@ struct dentry *msm_vidc_debugfs_init_core(struct msm_vidc_core *core, dir = debugfs_create_dir(debugfs_name, parent); if (IS_ERR_OR_NULL(dir)) { dir = NULL; +#ifdef CONFIG_DEBUG_FS dprintk(VIDC_ERR, "Failed to create debugfs for msm_vidc\n"); +#endif goto failed_create_dir; } if (!debugfs_create_file("info", 0444, dir, core, &core_info_fops)) { @@ -506,7 +508,9 @@ struct dentry *msm_vidc_debugfs_init_inst(struct msm_vidc_inst *inst, dir = debugfs_create_dir(debugfs_name, parent); if (IS_ERR_OR_NULL(dir)) { dir = NULL; +#ifdef CONFIG_DEBUG_FS dprintk(VIDC_ERR, "Failed to create debugfs for msm_vidc\n"); +#endif goto failed_create_dir; } diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 30d8f6a60557..30dc18f29618 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -2947,7 +2947,7 @@ long video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, v4l2_kioctl func) { - char sbuf[128]; + char sbuf[SZ_1K]; void *mbuf = NULL; void *parg = (void *)arg; long err = -EINVAL; diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 2455d1d16a61..6506ccfe111f 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -539,7 +539,7 @@ config PCI_ENDPOINT_TEST config UID_SYS_STATS bool "Per-UID statistics" - depends on PROFILING && TASK_XACCT && TASK_IO_ACCOUNTING + depends on TASK_XACCT && TASK_IO_ACCOUNTING help Per UID based cpu time statistics exported to /proc/uid_cputime Per UID based io statistics exported to /proc/uid_io @@ -554,7 +554,6 @@ config UID_SYS_STATS_DEBUG config MEMORY_STATE_TIME tristate "Memory freq/bandwidth time statistics" - depends on PROFILING help Memory time statistics exported to /sys/kernel/memory_state_time diff --git a/drivers/misc/akm09970.c b/drivers/misc/akm09970.c index 45b9a0e63d70..8034d2747780 100644 --- a/drivers/misc/akm09970.c +++ b/drivers/misc/akm09970.c @@ -197,7 +197,7 @@ static int akm09970_active(struct akm09970_soc_ctrl *c_ctrl, bool on) int rc = 0; uint8_t mode = 0x00; - pr_info("akm sensor %s\n", on ? "on" : "off"); + pr_debug("akm sensor %s\n", on ? "on" : "off"); if (!atomic_read(&c_ctrl->power_enabled) && on) { rc = akm09970_power_up(c_ctrl); @@ -249,7 +249,7 @@ static int akm09970_active(struct akm09970_soc_ctrl *c_ctrl, bool on) akm09970_power_down(c_ctrl); hrtimer_cancel(&c_ctrl->timer); } else { - pr_info("The same power state, do nothing!\n"); + pr_debug("The same power state, do nothing!\n"); } return 0; @@ -664,7 +664,7 @@ static int akm09970_probe(struct i2c_client *client, int rc = 0; struct akm09970_soc_ctrl *c_ctrl = NULL; - pr_info("Probe enter\n"); + pr_debug("Probe enter\n"); if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { pr_err("Check_functionality failed\n"); @@ -838,7 +838,7 @@ static int akm09970_remove(struct i2c_client *client) c_ctrl->pinctrl = NULL; } - pr_info("Removed exit\n"); + pr_debug("Removed exit\n"); return 0; } diff --git a/drivers/misc/drv8846.c b/drivers/misc/drv8846.c index ae4d3e83b3dc..3722cd62fe6f 100644 --- a/drivers/misc/drv8846.c +++ b/drivers/misc/drv8846.c @@ -177,7 +177,7 @@ static enum hrtimer_restart pwm_hrtimer_handler(struct hrtimer *timer) break; default: mctrl->pwm_setting.duty_ns = 0; - pr_info("default state."); + pr_debug("default state."); } schedule_work(&mctrl->pwm_apply_work); @@ -474,84 +474,84 @@ int drv8846_parse_dt(struct drv8846_soc_ctrl *mctrl) mctrl->gpio_mode0 = of_get_named_gpio_flags(of_node, "motor,gpio-mode0", 0, NULL); if (!gpio_is_valid(mctrl->gpio_mode0)) { - pr_info("mctrl->motor_data.gpio_mode0 is invalid."); + pr_debug("mctrl->motor_data.gpio_mode0 is invalid."); return -EINVAL; } mctrl->gpio_mode1 = of_get_named_gpio_flags(of_node, "motor,gpio-mode1", 0, NULL); if (!gpio_is_valid(mctrl->gpio_mode1)) { - pr_info("motor,mode1-gpio is invalid."); + pr_debug("motor,mode1-gpio is invalid."); return -EINVAL; } mctrl->gpio_sleep = of_get_named_gpio_flags(of_node, "motor,gpio-sleep", 0, NULL); if (!gpio_is_valid(mctrl->gpio_sleep)) { - pr_info("motor,sleep-gpio is invalid."); + pr_debug("motor,sleep-gpio is invalid."); return -EINVAL; } mctrl->gpio_dir = of_get_named_gpio_flags(of_node, "motor,gpio-dir", 0, NULL); if (!gpio_is_valid(mctrl->gpio_dir)) { - pr_info("motor,dir-gpio is invalid."); + pr_debug("motor,dir-gpio is invalid."); return -EINVAL; } mctrl->gpio_pwren = of_get_named_gpio_flags(of_node, "motor,gpio-pwren", 0, NULL); if (!gpio_is_valid(mctrl->gpio_pwren)) { - pr_info("power,en-gpio is invalid."); + pr_debug("power,en-gpio is invalid."); return -EINVAL; } rc = of_property_read_u32(of_node, "motor,rampup-pwm-period-ns", &mctrl->rampup_period_ns); if (rc < 0) { - pr_info("motor,rampup-pwm-period-ns not set, use default."); + pr_debug("motor,rampup-pwm-period-ns not set, use default."); mctrl->rampup_period_ns = RAMP_PERIOD_DEFAULT_NS; } rc = of_property_read_u32(of_node, "motor,high-pwm-period-ns", &mctrl->high_period_ns); if (rc < 0) { - pr_info("motor,high-pwm-period-ns not set, use default."); + pr_debug("motor,high-pwm-period-ns not set, use default."); mctrl->high_period_ns = HIGH_PERIOD_DEFAULT_NS; } rc = of_property_read_u32(of_node, "motor,rampdown-pwm-period-ns", &mctrl->rampdown_period_ns); if (rc < 0) { - pr_info("motor,rampdown-pwm-period-ns not set, use default."); + pr_debug("motor,rampdown-pwm-period-ns not set, use default."); mctrl->rampdown_period_ns = RAMP_PERIOD_DEFAULT_NS; } rc = of_property_read_u32(of_node, "motor,rampup-duration-ms", &mctrl->rampup_duration_ms); if (rc < 0) { - pr_info("motor,rampup-duration-ms not set, use default."); + pr_debug("motor,rampup-duration-ms not set, use default."); mctrl->rampup_duration_ms = RAMP_DURATION_DEFAULT_MS; } rc = of_property_read_u32(of_node, "motor,high-duration-ms", &mctrl->high_duration_ms); if (rc < 0) { - pr_info("motor,high-duration-ms not set, use default."); + pr_debug("motor,high-duration-ms not set, use default."); mctrl->high_duration_ms = HIGH_DURATION_DEFAULT_MS; } rc = of_property_read_u32(of_node, "motor,rampdown-duration-ms", &mctrl->rampdown_duration_ms); if (rc < 0) { - pr_info("motor,rampdown-duration-ms not set, use default."); + pr_debug("motor,rampdown-duration-ms not set, use default."); mctrl->rampdown_duration_ms = RAMP_DURATION_DEFAULT_MS; } rc = of_property_read_u32(of_node, "motor,step-mode", &mctrl->step_mode); if (rc < 0) { - pr_info("motor,step-mode not set, use default."); + pr_debug("motor,step-mode not set, use default."); mctrl->step_mode = DEFAULT_STEP_MODE; } diff --git a/drivers/net/usb/compatibility.h b/drivers/net/usb/compatibility.h new file mode 100644 index 000000000000..208d90c0afbb --- /dev/null +++ b/drivers/net/usb/compatibility.h @@ -0,0 +1,508 @@ +#ifndef LINUX_COMPATIBILITY_H +#define LINUX_COMPATIBILITY_H + +/* + * Definition and macro + */ + +#include +#include +#include + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31) + #include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) + #include +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) */ +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31) */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0) + #define SPEED_2500 2500 + #define SPEED_25000 25000 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0) + #define BMCR_SPEED10 0x0000 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) + #define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) + #define IS_REACHABLE(option) (defined(option) || \ + (defined(option##_MODULE) && defined(MODULE))) +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) + #define skb_vlan_tag_present(__skb) vlan_tx_tag_present(__skb) + #define skb_vlan_tag_get(__skb) vlan_tx_tag_get(__skb) + #define skb_vlan_tag_get_id(__skb) vlan_tx_tag_get_id(__skb) +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) + #define napi_alloc_skb(napi, length) netdev_alloc_skb_ip_align(netdev,length) + #define napi_complete_done(n, d) napi_complete(n) +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) + #ifndef smp_mb__before_atomic + #define smp_mb__before_atomic() smp_mb() + #endif + + #ifndef smp_mb__after_atomic + #define smp_mb__after_atomic() smp_mb() + #endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) + #define IS_ERR_OR_NULL(ptr) (!ptr) +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) + #define ether_addr_copy(dst, src) memcpy(dst, src, ETH_ALEN) +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) + #define BIT(nr) (1UL << (nr)) + #define BIT_ULL(nr) (1ULL << (nr)) + #define BITS_PER_BYTE 8 + #define reinit_completion(x) ((x)->done = 0) +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) + #define NETIF_F_HW_VLAN_CTAG_RX NETIF_F_HW_VLAN_RX + #define NETIF_F_HW_VLAN_CTAG_TX NETIF_F_HW_VLAN_TX +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) + #define USB_DEVICE_INTERFACE_CLASS(vend, prod, iclass) \ + USB_DEVICE_AND_INTERFACE_INFO(vend, prod, iclass, 0xff, 0) + + static inline __sum16 tcp_v6_check(int len, + const struct in6_addr *saddr, + const struct in6_addr *daddr, + __wsum base) + { + return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base); + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) + #ifndef SPEED_UNKNOWN + #define SPEED_UNKNOWN 0 + #endif + + #ifndef DUPLEX_UNKNOWN + #define DUPLEX_UNKNOWN 0xff + #endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) + #define eth_random_addr(addr) random_ether_addr(addr) + #define usb_enable_lpm(udev) + #define MDIO_EEE_100TX MDIO_AN_EEE_ADV_100TX /* 100TX EEE cap */ + #define MDIO_EEE_1000T MDIO_AN_EEE_ADV_1000T /* 1000T EEE cap */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) + #define ETH_MDIO_SUPPORTS_C22 MDIO_SUPPORTS_C22 + + static inline void eth_hw_addr_random(struct net_device *dev) + { + random_ether_addr(dev->dev_addr); + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) + #define module_usb_driver(__driver) \ + static int __init __driver##_init(void) \ + { \ + return usb_register(&(__driver)); \ + } \ + module_init(__driver##_init); \ + static void __exit __driver##_exit(void) \ + { \ + usb_deregister(&(__driver)); \ + } \ + module_exit(__driver##_exit); + + #define netdev_features_t u32 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) + #define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0) + + static inline struct page *skb_frag_page(const skb_frag_t *frag) + { + return frag->page; + } + + static inline void *skb_frag_address(const skb_frag_t *frag) + { + return page_address(skb_frag_page(frag)) + frag->page_offset; + } + + static inline unsigned int skb_frag_size(const skb_frag_t *frag) + { + return frag->size; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) + #define ndo_set_rx_mode ndo_set_multicast_list +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) + #define NETIF_F_RXCSUM (1 << 29) /* Receive checksumming offload */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) + #define MDIO_AN_EEE_ADV 60 /* EEE advertisement */ + #define MDIO_AN_EEE_ADV_100TX 0x0002 /* Advertise 100TX EEE cap */ + #define MDIO_AN_EEE_ADV_1000T 0x0004 /* Advertise 1000T EEE cap */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) + #define skb_checksum_none_assert(skb_ptr) (skb_ptr)->ip_summed = CHECKSUM_NONE + + static inline __be16 vlan_get_protocol(const struct sk_buff *skb) + { + __be16 protocol = 0; + + if (vlan_tx_tag_present(skb) || + skb->protocol != cpu_to_be16(ETH_P_8021Q)) + protocol = skb->protocol; + else { + __be16 proto, *protop; + protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr, + h_vlan_encapsulated_proto), + sizeof(proto), &proto); + if (likely(protop)) + protocol = *protop; + } + + return protocol; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) + #define skb_tx_timestamp(skb) + + #define queue_delayed_work(long_wq, work, delay) schedule_delayed_work(work, delay) + + static inline void usleep_range(unsigned long min, unsigned long max) + { + unsigned long ms = min / 1000; + + if (ms) + mdelay(ms); + + udelay(min % 1000); + } + + #define work_busy(x) 0 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) + static inline bool pci_dev_run_wake(struct pci_dev *dev) + { + return 1; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) + #define netdev_mc_count(netdev) ((netdev)->mc_count) + #define netdev_mc_empty(netdev) (netdev_mc_count(netdev) == 0) + + #define netif_printk(priv, type, level, netdev, fmt, args...) \ + do { \ + if (netif_msg_##type(priv)) \ + printk(level "%s: " fmt,(netdev)->name , ##args); \ + } while (0) + + #define netif_emerg(priv, type, netdev, fmt, args...) \ + netif_printk(priv, type, KERN_EMERG, netdev, fmt, ##args) + #define netif_alert(priv, type, netdev, fmt, args...) \ + netif_printk(priv, type, KERN_ALERT, netdev, fmt, ##args) + #define netif_crit(priv, type, netdev, fmt, args...) \ + netif_printk(priv, type, KERN_CRIT, netdev, fmt, ##args) + #define netif_err(priv, type, netdev, fmt, args...) \ + netif_printk(priv, type, KERN_ERR, netdev, fmt, ##args) + #define netif_warn(priv, type, netdev, fmt, args...) \ + netif_printk(priv, type, KERN_WARNING, netdev, fmt, ##args) + #define netif_notice(priv, type, netdev, fmt, args...) \ + netif_printk(priv, type, KERN_NOTICE, netdev, fmt, ##args) + #define netif_info(priv, type, netdev, fmt, args...) \ + netif_printk(priv, type, KERN_INFO, (netdev), fmt, ##args) + + static inline int usb_enable_autosuspend(struct usb_device *udev) + { return 0; } + static inline int usb_disable_autosuspend(struct usb_device *udev) + { return 0; } +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) + #define get_sset_count get_stats_count + + static inline + struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length) + { + struct sk_buff *skb = netdev_alloc_skb(dev, length + NET_IP_ALIGN); + + if (NET_IP_ALIGN && skb) + skb_reserve(skb, NET_IP_ALIGN); + return skb; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) + #define pm_request_resume(para) + #define pm_runtime_set_suspended(para) + #define pm_schedule_suspend(para1, para2) + #define pm_runtime_get_sync(para) + #define pm_runtime_put_sync(para) + #define pm_runtime_put_noidle(para) + #define pm_runtime_idle(para) + #define pm_runtime_set_active(para) + #define pm_runtime_enable(para) + #define pm_runtime_disable(para) + typedef int netdev_tx_t; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) + #define USB_SPEED_SUPER (USB_SPEED_VARIABLE + 1) + #define MDIO_MMD_AN 7 /* Auto-Negotiation */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) + #define napi_gro_receive(napi, skb) netif_receive_skb(skb) + #define vlan_gro_receive(napi, grp, vlan_tci, skb) \ + vlan_hwaccel_receive_skb(skb, grp, vlan_tci) + + static inline void usb_autopm_put_interface_async(struct usb_interface *intf) + { + struct usb_device *udev = interface_to_usbdev(intf); + int status = 0; + + if (intf->condition == USB_INTERFACE_UNBOUND) { + status = -ENODEV; + } else { + udev->last_busy = jiffies; + --intf->pm_usage_cnt; + if (udev->autosuspend_disabled || udev->autosuspend_delay < 0) + status = -EPERM; + } + } + + static inline int usb_autopm_get_interface_async(struct usb_interface *intf) + { + struct usb_device *udev = interface_to_usbdev(intf); + int status = 0; + + if (intf->condition == USB_INTERFACE_UNBOUND) + status = -ENODEV; + else if (udev->autoresume_disabled) + status = -EPERM; + else + ++intf->pm_usage_cnt; + return status; + } + + static inline int eth_change_mtu(struct net_device *dev, int new_mtu) + { + if (new_mtu < 68 || new_mtu > ETH_DATA_LEN) + return -EINVAL; + dev->mtu = new_mtu; + return 0; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) + static inline void __skb_queue_splice(const struct sk_buff_head *list, + struct sk_buff *prev, + struct sk_buff *next) + { + struct sk_buff *first = list->next; + struct sk_buff *last = list->prev; + + first->prev = prev; + prev->next = first; + + last->next = next; + next->prev = last; + } + + static inline void skb_queue_splice(const struct sk_buff_head *list, + struct sk_buff_head *head) + { + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, (struct sk_buff *) head, head->next); + head->qlen += list->qlen; + } + } + + static inline void __skb_queue_head_init(struct sk_buff_head *list) + { + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; + } + + static inline void skb_queue_splice_init(struct sk_buff_head *list, + struct sk_buff_head *head) + { + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, (struct sk_buff *) head, head->next); + head->qlen += list->qlen; + __skb_queue_head_init(list); + } + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) + #define PM_EVENT_AUTO 0x0400 + + static inline void __list_splice2(const struct list_head *list, + struct list_head *prev, + struct list_head *next) + { + struct list_head *first = list->next; + struct list_head *last = list->prev; + + first->prev = prev; + prev->next = first; + + last->next = next; + next->prev = last; + } + + static inline void list_splice_tail(struct list_head *list, + struct list_head *head) + { + if (!list_empty(list)) + __list_splice2(list, head->prev, head); + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) + struct napi_struct { + struct list_head poll_list; + unsigned long state; + int weight; + int (*poll)(struct napi_struct *, int); + #ifdef CONFIG_NETPOLL + spinlock_t poll_lock; + int poll_owner; + struct net_device *dev; + struct list_head dev_list; + #endif + }; + + #define napi_enable(napi_ptr) netif_poll_enable(container_of(napi_ptr, struct r8152, napi)->netdev) + #define napi_disable(napi_ptr) netif_poll_disable(container_of(napi_ptr, struct r8152, napi)->netdev) + #define napi_schedule(napi_ptr) netif_rx_schedule(container_of(napi_ptr, struct r8152, napi)->netdev) + #define napi_complete(napi_ptr) netif_rx_complete(container_of(napi_ptr, struct r8152, napi)->netdev) + #define netif_napi_add(ndev, napi_ptr, function, weight_t) \ + ndev->poll = function; \ + ndev->weight = weight_t; + typedef unsigned long uintptr_t; + #define DMA_BIT_MASK(value) \ + (value < 64 ? ((1ULL << value) - 1) : 0xFFFFFFFFFFFFFFFFULL) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) + #define NETIF_F_IPV6_CSUM 16 + #define cancel_delayed_work_sync cancel_delayed_work + + static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) + { + int delta = 0; + + if (headroom > skb_headroom(skb)) + delta = headroom - skb_headroom(skb); + + if (delta || skb_header_cloned(skb)) + return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), + 0, GFP_ATOMIC); + return 0; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) + #define ip_hdr(skb_ptr) (skb_ptr)->nh.iph + #define ipv6hdr(skb_ptr) (skb_ptr)->nh.ipv6h + + static inline void skb_copy_from_linear_data(const struct sk_buff *skb, + void *to, + const unsigned int len) + { + memcpy(to, skb->data, len); + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) + #define vlan_group_set_device(vlgrp, vid, value) \ + if (vlgrp) \ + (vlgrp)->vlan_devices[vid] = value; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) + #define delayed_work work_struct + #define INIT_DELAYED_WORK(a,b) INIT_WORK(a,b,tp) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) + #define CHECKSUM_PARTIAL CHECKSUM_HW + + static inline void *kmemdup(const void *src, size_t len, gfp_t gfp) + { + void *p; + + p = kmalloc_track_caller(len, gfp); + if (p) + memcpy(p, src, len); + return p; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) + #define skb_is_gso(skb_ptr) skb_shinfo(skb_ptr)->tso_size + #define netdev_alloc_skb(dev, len) dev_alloc_skb(len) + #define IRQF_SHARED SA_SHIRQ + + static inline struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) + { + return NULL; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) + #ifndef __LINUX_MUTEX_H + #define mutex semaphore + #define mutex_lock down + #define mutex_unlock up + #define mutex_trylock down_trylock + #define mutex_lock_interruptible down_interruptible + #define mutex_init init_MUTEX + #endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) + #define ADVERTISED_Pause (1 << 13) + #define ADVERTISED_Asym_Pause (1 << 14) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) + #define skb_header_cloned(skb) skb_cloned(skb) +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) */ + static inline void netif_napi_del(struct napi_struct *napi) + { + #ifdef CONFIG_NETPOLL + list_del(&napi->dev_list); + #endif + } +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0) */ + +#ifndef FALSE + #define TRUE 1 + #define FALSE 0 +#endif + +enum rtl_cmd { + RTLTOOL_PLA_OCP_READ_DWORD = 0, + RTLTOOL_PLA_OCP_WRITE_DWORD, + RTLTOOL_USB_OCP_READ_DWORD, + RTLTOOL_USB_OCP_WRITE_DWORD, + RTLTOOL_PLA_OCP_READ, + RTLTOOL_PLA_OCP_WRITE, + RTLTOOL_USB_OCP_READ, + RTLTOOL_USB_OCP_WRITE, + RTLTOOL_USB_INFO, + RTL_ENABLE_USB_DIAG, + RTL_DISABLE_USB_DIAG, + + RTLTOOL_INVALID +}; + +struct usb_device_info { + __u16 idVendor; + __u16 idProduct; + __u16 bcdDevice; + __u8 dev_addr[8]; + char devpath[16]; +}; + +struct rtltool_cmd { + __u32 cmd; + __u32 offset; + __u32 byteen; + __u32 data; + void *buf; + struct usb_device_info nic_info; + struct sockaddr ifru_addr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; +}; + +#endif /* LINUX_COMPATIBILITY_H */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index e30792380812..2b7789f19c61 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1,10 +1,12 @@ /* - * Copyright (c) 2014 Realtek Semiconductor Corp. All rights reserved. + * Copyright (c) 2017 Realtek Semiconductor Corp. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. */ #include @@ -22,23 +24,21 @@ #include #include #include -#include -#include #include #include -#include -/* Information for net-next */ -#define NETNEXT_VERSION "09" +#include "compatibility.h" -/* Information for net */ -#define NET_VERSION "9" - -#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION -#define DRIVER_AUTHOR "Realtek linux nic maintainers " +/* Version Information */ +#define DRIVER_VERSION "v2.12.0 (2019/04/29)" +#define DRIVER_AUTHOR "Realtek nic sw " #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" #define MODULENAME "r8152" +#define PATENTS "This product is covered by one or more of the " \ + "following patents:\n" \ + "\t\tUS6,570,884, US6,115,776, and US6,327,625.\n" + #define R8152_PHY_ID 32 #define PLA_IDR 0xc000 @@ -297,6 +297,7 @@ /* PLA_PHY_PWR */ #define TX_10M_IDLE_EN 0x0080 #define PFM_PWM_SWITCH 0x0040 +#define TEST_IO_OFF BIT(4) /* PLA_MAC_PWR_CTRL */ #define D3_CLK_GATED_EN 0x00004000 @@ -440,13 +441,14 @@ #define UPS_FLAGS_EN_EEE BIT(20) #define UPS_FLAGS_EN_500M_EEE BIT(21) #define UPS_FLAGS_EN_EEE_CKDIV BIT(22) +#define UPS_FLAGS_EEE_PLLOFF_100 BIT(23) #define UPS_FLAGS_EEE_PLLOFF_GIGA BIT(24) #define UPS_FLAGS_EEE_CMOD_LV_EN BIT(25) #define UPS_FLAGS_EN_GREEN BIT(26) #define UPS_FLAGS_EN_FLOW_CTR BIT(27) enum spd_duplex { - NWAY_10M_HALF = 1, + NWAY_10M_HALF, NWAY_10M_FULL, NWAY_100M_HALF, NWAY_100M_FULL, @@ -455,6 +457,8 @@ enum spd_duplex { FORCE_10M_FULL, FORCE_100M_HALF, FORCE_100M_FULL, + FORCE_1000M_FULL, + NWAY_2500M_FULL, }; /* OCP_ALDPS_CONFIG */ @@ -553,12 +557,10 @@ enum spd_duplex { /* SRAM_IMPEDANCE */ #define RX_DRIVING_MASK 0x6000 -/* MAC PASSTHRU */ -#define AD_MASK 0xfee0 -#define EFUSE 0xcfdb -#define PASS_THRU_MASK 0x1 - enum rtl_register_content { + _2500bps = BIT(10), + _1250bps = BIT(9), + _500bps = BIT(8), _1000bps = 0x10, _100bps = 0x08, _10bps = 0x04, @@ -604,8 +606,10 @@ enum rtl8152_flags { RTL8152_LINK_CHG, SELECTIVE_SUSPEND, PHY_RESET, - SCHEDULE_NAPI, + SCHEDULE_TASKLET, GREEN_ETHERNET, + RECOVER_SPEED, + SUPPORT_2500FULL, }; /* Define these values to match your device */ @@ -613,9 +617,8 @@ enum rtl8152_flags { #define VENDOR_ID_MICROSOFT 0x045e #define VENDOR_ID_SAMSUNG 0x04e8 #define VENDOR_ID_LENOVO 0x17ef -#define VENDOR_ID_LINKSYS 0x13b1 -#define VENDOR_ID_NVIDIA 0x0955 #define VENDOR_ID_TPLINK 0x2357 +#define VENDOR_ID_NVIDIA 0x0955 #define MCU_TYPE_PLA 0x0100 #define MCU_TYPE_USB 0x0000 @@ -638,6 +641,7 @@ struct tally_counter { struct rx_desc { __le32 opts1; +#define RD_CRC BIT(15) #define RX_LEN_MASK 0x7fff __le32 opts2; @@ -661,6 +665,7 @@ struct tx_desc { __le32 opts1; #define TX_FS BIT(31) /* First segment of a packet */ #define TX_LS BIT(30) /* Final segment of a packet */ +#define LGSEND BIT(29) #define GTSENDV4 BIT(28) #define GTSENDV6 BIT(27) #define GTTCPHO_SHIFT 18 @@ -714,9 +719,16 @@ struct r8152 { struct delayed_work schedule, hw_phy_work; struct mii_if_info mii; struct mutex control; /* use for hw setting */ -#ifdef CONFIG_PM_SLEEP +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + struct vlan_group *vlgrp; +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) + struct net_device_stats stats; +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) && defined(CONFIG_PM_SLEEP) struct notifier_block pm_notifier; #endif + struct tasklet_struct tx_tl; struct rtl_ops { void (*init)(struct r8152 *); @@ -725,22 +737,46 @@ struct r8152 { void (*up)(struct r8152 *); void (*down)(struct r8152 *); void (*unload)(struct r8152 *); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) int (*eee_get)(struct r8152 *, struct ethtool_eee *); int (*eee_set)(struct r8152 *, struct ethtool_eee *); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) */ bool (*in_nway)(struct r8152 *); void (*hw_phy_cfg)(struct r8152 *); void (*autosuspend_en)(struct r8152 *tp, bool enable); } rtl_ops; + struct ups_info { + u32 _10m_ckdiv:1; + u32 _250m_ckdiv:1; + u32 aldps:1; + u32 lite_mode:2; + u32 speed_duplex:4; + u32 eee:1; + u32 eee_lite:1; + u32 eee_ckdiv:1; + u32 eee_plloff_100:1; + u32 eee_plloff_giga:1; + u32 eee_cmod_lv:1; + u32 green:1; + u32 flow_control:1; + u32 ctap_short_off:1; + } ups_info; + + bool eee_en; int intr_interval; u32 saved_wolopts; u32 msg_enable; u32 tx_qlen; u32 coalesce; + u32 advertising; + u32 rx_buf_sz; u16 ocp_base; u16 speed; + u16 eee_adv; u8 *intr_buff; u8 version; + u8 rtk_enable_diag; u8 duplex; u8 autoneg; }; @@ -756,6 +792,11 @@ enum rtl_version { RTL_VER_07, RTL_VER_08, RTL_VER_09, + + RTL_TEST_01, + RTL_VER_10, + RTL_VER_11, + RTL_VER_MAX }; @@ -816,6 +857,14 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) return ret; } +static void rtl_set_unplug(struct r8152 *tp) +{ + if (tp->udev->state == USB_STATE_NOTATTACHED) { + set_bit(RTL8152_UNPLUG, &tp->flags); + smp_mb__after_atomic(); + } +} + static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, void *data, u16 type) { @@ -854,7 +903,7 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, } if (ret == -ENODEV) - set_bit(RTL8152_UNPLUG, &tp->flags); + rtl_set_unplug(tp); return ret; } @@ -924,7 +973,7 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen, error1: if (ret == -ENODEV) - set_bit(RTL8152_UNPLUG, &tp->flags); + rtl_set_unplug(tp); return ret; } @@ -941,6 +990,12 @@ int pla_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data) return generic_ocp_write(tp, index, byteen, size, data, MCU_TYPE_PLA); } +static inline +int usb_ocp_read(struct r8152 *tp, u16 index, u16 size, void *data) +{ + return generic_ocp_read(tp, index, size, data, MCU_TYPE_USB); +} + static inline int usb_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data) { @@ -1094,7 +1149,7 @@ static u16 sram_read(struct r8152 *tp, u16 addr) static int read_mii_word(struct net_device *netdev, int phy_id, int reg) { struct r8152 *tp = netdev_priv(netdev); - int ret; + int ret, lock; if (test_bit(RTL8152_UNPLUG, &tp->flags)) return -ENODEV; @@ -1102,8 +1157,15 @@ static int read_mii_word(struct net_device *netdev, int phy_id, int reg) if (phy_id != R8152_PHY_ID) return -EINVAL; + lock = mutex_trylock(&tp->control); + ret = r8152_mdio_read(tp, reg); + if (lock) { + mutex_unlock(&tp->control); + netif_warn(tp, drv, netdev, "miss mutex for read_mii_word?\n"); + } + return ret; } @@ -1111,6 +1173,7 @@ static void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val) { struct r8152 *tp = netdev_priv(netdev); + int lock; if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; @@ -1118,18 +1181,32 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val) if (phy_id != R8152_PHY_ID) return; + lock = mutex_trylock(&tp->control); + r8152_mdio_write(tp, reg, val); + + if (lock) { + mutex_unlock(&tp->control); + netif_warn(tp, drv, netdev, "miss mutex for write_mii_word?\n"); + } } static int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags); +static int +rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex, + u32 advertising); + static int rtl8152_set_mac_address(struct net_device *netdev, void *p) { struct r8152 *tp = netdev_priv(netdev); struct sockaddr *addr = p; int ret = -EADDRNOTAVAIL; + if (unlikely(tp->rtk_enable_diag)) + return -EBUSY; + if (!is_valid_ether_addr(addr->sa_data)) goto out1; @@ -1152,81 +1229,16 @@ out1: return ret; } -/* Devices containing RTL8153-AD can support a persistent - * host system provided MAC address. - * Examples of this are Dell TB15 and Dell WD15 docks - */ -static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa) -{ - acpi_status status; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *obj; - int ret = -EINVAL; - u32 ocp_data; - unsigned char buf[6]; - - /* test for -AD variant of RTL8153 */ - ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); - if ((ocp_data & AD_MASK) != 0x1000) - return -ENODEV; - - /* test for MAC address pass-through bit */ - ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE); - if ((ocp_data & PASS_THRU_MASK) != 1) - return -ENODEV; - - /* returns _AUXMAC_#AABBCCDDEEFF# */ - status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer); - obj = (union acpi_object *)buffer.pointer; - if (!ACPI_SUCCESS(status)) - return -ENODEV; - if (obj->type != ACPI_TYPE_BUFFER || obj->string.length != 0x17) { - netif_warn(tp, probe, tp->netdev, - "Invalid buffer for pass-thru MAC addr: (%d, %d)\n", - obj->type, obj->string.length); - goto amacout; - } - if (strncmp(obj->string.pointer, "_AUXMAC_#", 9) != 0 || - strncmp(obj->string.pointer + 0x15, "#", 1) != 0) { - netif_warn(tp, probe, tp->netdev, - "Invalid header when reading pass-thru MAC addr\n"); - goto amacout; - } - ret = hex2bin(buf, obj->string.pointer + 9, 6); - if (!(ret == 0 && is_valid_ether_addr(buf))) { - netif_warn(tp, probe, tp->netdev, - "Invalid MAC for pass-thru MAC addr: %d, %pM\n", - ret, buf); - ret = -EINVAL; - goto amacout; - } - memcpy(sa->sa_data, buf, 6); - ether_addr_copy(tp->netdev->dev_addr, sa->sa_data); - netif_info(tp, probe, tp->netdev, - "Using pass-thru MAC addr %pM\n", sa->sa_data); - -amacout: - kfree(obj); - return ret; -} - static int set_ethernet_addr(struct r8152 *tp) { struct net_device *dev = tp->netdev; struct sockaddr sa; int ret; - if (tp->version == RTL_VER_01) { + if (tp->version == RTL_VER_01) ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data); - } else { - /* if this is not an RTL8153-AD, no eFuse mac pass thru set, - * or system doesn't provide valid _SB.AMAC this will be - * be expected to non-zero - */ - ret = vendor_mac_passthru_addr_read(tp, &sa); - if (ret < 0) - ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data); - } + else + ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data); if (ret < 0) { netif_err(tp, probe, dev, "Get ether addr fail\n"); @@ -1248,12 +1260,24 @@ static int set_ethernet_addr(struct r8152 *tp) return ret; } +static inline struct net_device_stats *rtl8152_get_stats(struct net_device *dev) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) + struct rtl8152 *tp = netdev_priv(dev); + + return (struct net_device_stats *)&tp->stats; +#else + return &dev->stats; +#endif +} + static void read_bulk_callback(struct urb *urb) { struct net_device *netdev; int status = urb->status; struct rx_agg *agg; struct r8152 *tp; + unsigned long flags; agg = urb->context; if (!agg) @@ -1283,24 +1307,26 @@ static void read_bulk_callback(struct urb *urb) if (urb->actual_length < ETH_ZLEN) break; - spin_lock(&tp->rx_lock); + spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&agg->list, &tp->rx_done); - spin_unlock(&tp->rx_lock); + spin_unlock_irqrestore(&tp->rx_lock, flags); napi_schedule(&tp->napi); return; case -ESHUTDOWN: - set_bit(RTL8152_UNPLUG, &tp->flags); + rtl_set_unplug(tp); netif_device_detach(tp->netdev); return; case -ENOENT: return; /* the urb is in unlink state */ case -ETIME: if (net_ratelimit()) - netdev_warn(netdev, "maybe reset is needed?\n"); + netif_warn(tp, rx_err, netdev, + "maybe reset is needed?\n"); break; default: if (net_ratelimit()) - netdev_warn(netdev, "Rx status %d\n", status); + netif_warn(tp, rx_err, netdev, + "Rx status %d\n", status); break; } @@ -1313,6 +1339,7 @@ static void write_bulk_callback(struct urb *urb) struct net_device *netdev; struct tx_agg *agg; struct r8152 *tp; + unsigned long flags; int status = urb->status; agg = urb->context; @@ -1324,19 +1351,20 @@ static void write_bulk_callback(struct urb *urb) return; netdev = tp->netdev; - stats = &netdev->stats; + stats = rtl8152_get_stats(netdev); if (status) { if (net_ratelimit()) - netdev_warn(netdev, "Tx status %d\n", status); + netif_warn(tp, tx_err, netdev, + "Tx status %d\n", status); stats->tx_errors += agg->skb_num; } else { stats->tx_packets += agg->skb_num; stats->tx_bytes += agg->skb_len; } - spin_lock(&tp->tx_lock); + spin_lock_irqsave(&tp->tx_lock, flags); list_add_tail(&agg->list, &tp->tx_free); - spin_unlock(&tp->tx_lock); + spin_unlock_irqrestore(&tp->tx_lock, flags); usb_autopm_put_interface_async(tp->intf); @@ -1350,7 +1378,7 @@ static void write_bulk_callback(struct urb *urb) return; if (!skb_queue_empty(&tp->tx_queue)) - napi_schedule(&tp->napi); + tasklet_schedule(&tp->tx_tl); } static void intr_callback(struct urb *urb) @@ -1407,7 +1435,7 @@ static void intr_callback(struct urb *urb) resubmit: res = usb_submit_urb(urb, GFP_ATOMIC); if (res == -ENODEV) { - set_bit(RTL8152_UNPLUG, &tp->flags); + rtl_set_unplug(tp); netif_device_detach(tp->netdev); } else if (res) { netif_err(tp, intr, tp->netdev, @@ -1474,13 +1502,13 @@ static int alloc_all_mem(struct r8152 *tp) skb_queue_head_init(&tp->rx_queue); for (i = 0; i < RTL8152_MAX_RX; i++) { - buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node); + buf = kmalloc_node(tp->rx_buf_sz, GFP_KERNEL, node); if (!buf) goto err1; if (buf != rx_agg_align(buf)) { kfree(buf); - buf = kmalloc_node(agg_buf_sz + RX_ALIGN, GFP_KERNEL, + buf = kmalloc_node(tp->rx_buf_sz + RX_ALIGN, GFP_KERNEL, node); if (!buf) goto err1; @@ -1605,7 +1633,7 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb, struct net_device_stats *stats; drop: - stats = &tp->netdev->stats; + stats = rtl8152_get_stats(tp->netdev); stats->tx_dropped++; dev_kfree_skb(skb); } @@ -1644,15 +1672,62 @@ static inline void rtl_tx_vlan_tag(struct tx_desc *desc, struct sk_buff *skb) } } +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + +static inline bool +rtl_rx_vlan_tag(struct r8152 *tp, struct rx_desc *desc, struct sk_buff *skb) +{ + u32 opts2 = le32_to_cpu(desc->opts2); + + if (tp->vlgrp && (opts2 & RX_VLAN_TAG)) { + vlan_gro_receive(&tp->napi, tp->vlgrp, swab16(opts2 & 0xffff), + skb); + return true; + } + + return false; +} + +static inline void +rtl_vlan_put_tag(struct r8152 *tp, struct rx_desc *desc, struct sk_buff *skb) +{ + u32 opts2 = le32_to_cpu(desc->opts2); + + if (tp->vlgrp && (opts2 & RX_VLAN_TAG)) + __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); +} + +static inline __u16 +rtl_vlan_get_tag(struct sk_buff *skb) +{ + __u16 tag; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) + __vlan_hwaccel_get_tag(skb, &tag); +#else + tag = skb->vlan_tci; +#endif + + return tag; +} + +#else + static inline void rtl_rx_vlan_tag(struct rx_desc *desc, struct sk_buff *skb) { u32 opts2 = le32_to_cpu(desc->opts2); if (opts2 & RX_VLAN_TAG) +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) + __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); +#else __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) */ } +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) */ + static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb, u32 len, u32 transport_offset) { @@ -1847,11 +1922,9 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc) if (opts2 & RD_IPV4_CS) { if (opts3 & IPF) checksum = CHECKSUM_NONE; - else if ((opts2 & RD_UDP_CS) && (opts3 & UDPF)) - checksum = CHECKSUM_NONE; - else if ((opts2 & RD_TCP_CS) && (opts3 & TCPF)) - checksum = CHECKSUM_NONE; - else + else if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF)) + checksum = CHECKSUM_UNNECESSARY; + else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF)) checksum = CHECKSUM_UNNECESSARY; } else if (opts2 & RD_IPV6_CS) { if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF)) @@ -1875,15 +1948,34 @@ static int rx_bottom(struct r8152 *tp, int budget) while (work_done < budget) { struct sk_buff *skb = __skb_dequeue(&tp->rx_queue); struct net_device *netdev = tp->netdev; - struct net_device_stats *stats = &netdev->stats; + struct net_device_stats *stats; unsigned int pkt_len; +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + u16 vlan_tci; +#endif if (!skb) break; pkt_len = skb->len; + stats = rtl8152_get_stats(netdev); +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + vlan_tci = rtl_vlan_get_tag(skb); + + if (vlan_tci) + vlan_gro_receive(napi, tp->vlgrp, vlan_tci, + skb); + else + napi_gro_receive(napi, skb); +#else napi_gro_receive(napi, skb); +#endif + work_done++; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) + netdev->last_rx = jiffies; +#endif stats->rx_packets++; stats->rx_bytes += pkt_len; } @@ -1917,7 +2009,7 @@ static int rx_bottom(struct r8152 *tp, int budget) while (urb->actual_length > len_used) { struct net_device *netdev = tp->netdev; - struct net_device_stats *stats = &netdev->stats; + struct net_device_stats *stats; unsigned int pkt_len; struct sk_buff *skb; @@ -1933,6 +2025,8 @@ static int rx_bottom(struct r8152 *tp, int budget) if (urb->actual_length < len_used) break; + stats = rtl8152_get_stats(netdev); + pkt_len -= ETH_FCS_LEN; rx_data += sizeof(struct rx_desc); @@ -1945,7 +2039,26 @@ static int rx_bottom(struct r8152 *tp, int budget) skb->ip_summed = r8152_rx_csum(tp, rx_desc); memcpy(skb->data, rx_data, pkt_len); skb_put(skb, pkt_len); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) + skb->dev = netdev; +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) */ skb->protocol = eth_type_trans(skb, netdev); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + if (work_done < budget) { + if (!rtl_rx_vlan_tag(tp, rx_desc, skb)) + napi_gro_receive(napi, skb); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) + netdev->last_rx = jiffies; +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) */ + work_done++; + stats->rx_packets++; + stats->rx_bytes += pkt_len; + } else { + rtl_vlan_put_tag(tp, rx_desc, skb); + __skb_queue_tail(&tp->rx_queue, skb); + } +#else rtl_rx_vlan_tag(rx_desc, skb); if (work_done < budget) { napi_gro_receive(napi, skb); @@ -1955,6 +2068,7 @@ static int rx_bottom(struct r8152 *tp, int budget) } else { __skb_queue_tail(&tp->rx_queue, skb); } +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) */ find_next_rx: rx_data = rx_agg_align(rx_data + pkt_len + ETH_FCS_LEN); @@ -2001,12 +2115,13 @@ static void tx_bottom(struct r8152 *tp) struct net_device *netdev = tp->netdev; if (res == -ENODEV) { - set_bit(RTL8152_UNPLUG, &tp->flags); + rtl_set_unplug(tp); netif_device_detach(netdev); } else { - struct net_device_stats *stats = &netdev->stats; + struct net_device_stats *stats; unsigned long flags; + stats = rtl8152_get_stats(netdev); netif_warn(tp, tx_err, netdev, "failed tx_urb %d\n", res); stats->tx_dropped += agg->skb_num; @@ -2019,8 +2134,12 @@ static void tx_bottom(struct r8152 *tp) } while (res == 0); } -static void bottom_half(struct r8152 *tp) +static void bottom_half(unsigned long data) { + struct r8152 *tp; + + tp = (struct r8152 *)data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; @@ -2032,33 +2151,62 @@ static void bottom_half(struct r8152 *tp) if (!netif_carrier_ok(tp->netdev)) return; - clear_bit(SCHEDULE_NAPI, &tp->flags); + clear_bit(SCHEDULE_TASKLET, &tp->flags); tx_bottom(tp); } -static int r8152_poll(struct napi_struct *napi, int budget) +static inline int __r8152_poll(struct r8152 *tp, int budget) { - struct r8152 *tp = container_of(napi, struct r8152, napi); + struct napi_struct *napi = &tp->napi; int work_done; work_done = rx_bottom(tp, budget); - bottom_half(tp); if (work_done < budget) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) + napi_complete_done(napi, work_done); +#else if (!napi_complete_done(napi, work_done)) goto out; +#endif if (!list_empty(&tp->rx_done)) napi_schedule(napi); - else if (!skb_queue_empty(&tp->tx_queue) && - !list_empty(&tp->tx_free)) - napi_schedule(napi); } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) out: +#endif return work_done; } +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) + +static int r8152_poll(struct net_device *dev, int *budget) +{ + struct r8152 *tp = netdev_priv(dev); + int quota = min(dev->quota, *budget); + int work_done; + + work_done = __r8152_poll(tp, quota); + + *budget -= work_done; + dev->quota -= work_done; + + return (work_done >= quota); +} + +#else + +static int r8152_poll(struct napi_struct *napi, int budget) +{ + struct r8152 *tp = container_of(napi, struct r8152, napi); + + return __r8152_poll(tp, budget); +} + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) */ + static int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags) { @@ -2070,12 +2218,12 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags) return 0; usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1), - agg->head, agg_buf_sz, + agg->head, tp->rx_buf_sz, (usb_complete_t)read_bulk_callback, agg); ret = usb_submit_urb(agg->urb, mem_flags); if (ret == -ENODEV) { - set_bit(RTL8152_UNPLUG, &tp->flags); + rtl_set_unplug(tp); netif_device_detach(tp->netdev); } else if (ret) { struct urb *urb = agg->urb; @@ -2097,7 +2245,7 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags) static void rtl_drop_queued_tx(struct r8152 *tp) { - struct net_device_stats *stats = &tp->netdev->stats; + struct net_device_stats *stats = rtl8152_get_stats(tp->netdev); struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue; struct sk_buff *skb; @@ -2118,29 +2266,40 @@ static void rtl_drop_queued_tx(struct r8152 *tp) static void rtl8152_tx_timeout(struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) + int i; +#endif netif_warn(tp, tx_err, netdev, "Tx timeout\n"); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) + for (i = 0; i < RTL8152_MAX_TX; i++) + usb_unlink_urb(tp->tx_info[i].urb); +#else usb_queue_reset_device(tp->intf); +#endif } static void rtl8152_set_rx_mode(struct net_device *netdev) -{ - struct r8152 *tp = netdev_priv(netdev); - - if (netif_carrier_ok(netdev)) { - set_bit(RTL8152_SET_RX_MODE, &tp->flags); - schedule_delayed_work(&tp->schedule, 0); - } -} - -static void _rtl8152_set_rx_mode(struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); u32 mc_filter[2]; /* Multicast hash filter */ __le32 tmp[2]; u32 ocp_data; + if (in_atomic()) { + if (netif_carrier_ok(netdev)) { + set_bit(RTL8152_SET_RX_MODE, &tp->flags); + schedule_delayed_work(&tp->schedule, 0); + } + return; + } + + clear_bit(RTL8152_SET_RX_MODE, &tp->flags); + + if (!netif_carrier_ok(netdev)) + return; + netif_stop_queue(netdev); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; @@ -2159,6 +2318,21 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev) mc_filter[1] = 0xffffffff; mc_filter[0] = 0xffffffff; } else { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) + struct dev_mc_list *mclist; + unsigned int i; + + mc_filter[1] = mc_filter[0] = 0; + for (i = 0, mclist = netdev->mc_list; + mclist && i < netdev->mc_count; + i++, mclist = mclist->next) { + int bit_nr; + + bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + ocp_data |= RCR_AM; + } +#else struct netdev_hw_addr *ha; mc_filter[1] = 0; @@ -2169,6 +2343,7 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev) mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); ocp_data |= RCR_AM; } +#endif } tmp[0] = __cpu_to_le32(swab32(mc_filter[1])); @@ -2179,6 +2354,7 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev) netif_wake_queue(netdev); } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,4) static netdev_features_t rtl8152_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) @@ -2194,23 +2370,51 @@ rtl8152_features_check(struct sk_buff *skb, struct net_device *dev, return features; } +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,4) */ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) + if (unlikely((skb->len + sizeof(struct tx_desc)) > agg_buf_sz)) { + netdev_features_t features = netdev->features; + struct sk_buff *segs, *nskb; + + features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + segs = skb_gso_segment(skb, features); + if (IS_ERR(segs) || !segs) + goto free_skb; + + do { + nskb = segs; + segs = segs->next; + nskb->next = NULL; + rtl8152_start_xmit(nskb, netdev); + } while (segs); + +free_skb: + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) + netdev->trans_start = jiffies +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) */ +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) */ + skb_tx_timestamp(skb); skb_queue_tail(&tp->tx_queue, skb); if (!list_empty(&tp->tx_free)) { if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { - set_bit(SCHEDULE_NAPI, &tp->flags); + set_bit(SCHEDULE_TASKLET, &tp->flags); schedule_delayed_work(&tp->schedule, 0); } else { usb_mark_last_busy(tp->udev); - napi_schedule(&tp->napi); + tasklet_schedule(&tp->tx_tl); } } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) { netif_stop_queue(netdev); @@ -2251,15 +2455,15 @@ static void set_tx_qlen(struct r8152 *tp) sizeof(struct tx_desc)); } -static inline u8 rtl8152_get_speed(struct r8152 *tp) +static inline u16 rtl8152_get_speed(struct r8152 *tp) { - return ocp_read_byte(tp, MCU_TYPE_PLA, PLA_PHYSTATUS); + return ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHYSTATUS); } static void rtl_set_eee_plus(struct r8152 *tp) { u32 ocp_data; - u8 speed; + u16 speed; speed = rtl8152_get_speed(tp); if (speed & _10bps) { @@ -2285,6 +2489,30 @@ static void rxdy_gated_en(struct r8152 *tp, bool enable) ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data); } +#if defined(RTL8152_S5_WOL) && defined(CONFIG_PM) +static int rtl_s5_wol(struct r8152 *tp) +{ + struct usb_device *udev = tp->udev; + + if (!tp->saved_wolopts) + return 0; + + /* usb_enable_remote_wakeup */ + if (udev->speed < USB_SPEED_SUPER) + return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, + USB_DEVICE_REMOTE_WAKEUP, 0, NULL, 0, + USB_CTRL_SET_TIMEOUT); + else + return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + USB_REQ_SET_FEATURE, USB_RECIP_INTERFACE, + USB_INTRF_FUNC_SUSPEND, + USB_INTRF_FUNC_SUSPEND_RW | + USB_INTRF_FUNC_SUSPEND_LP, + NULL, 0, USB_CTRL_SET_TIMEOUT); +} +#endif + static int rtl_start_rx(struct r8152 *tp) { int i, ret = 0; @@ -2332,6 +2560,12 @@ static int rtl_stop_rx(struct r8152 *tp) return 0; } +static inline void r8153b_rx_agg_chg_indicate(struct r8152 *tp) +{ + ocp_write_byte(tp, MCU_TYPE_USB, USB_UPT_RXDMA_OWN, + OWN_UPDATE | OWN_CLEAR); +} + static int rtl_enable(struct r8152 *tp) { u32 ocp_data; @@ -2342,6 +2576,16 @@ static int rtl_enable(struct r8152 *tp) ocp_data |= CR_RE | CR_TE; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data); + switch (tp->version) { + case RTL_VER_08: + case RTL_VER_09: + case RTL_TEST_01: + r8153b_rx_agg_chg_indicate(tp); + break; + default: + break; + } + rxdy_gated_en(tp, false); return 0; @@ -2358,12 +2602,6 @@ static int rtl8152_enable(struct r8152 *tp) return rtl_enable(tp); } -static inline void r8153b_rx_agg_chg_indicate(struct r8152 *tp) -{ - ocp_write_byte(tp, MCU_TYPE_USB, USB_UPT_RXDMA_OWN, - OWN_UPDATE | OWN_CLEAR); -} - static void r8153_set_rx_early_timeout(struct r8152 *tp) { u32 ocp_data = tp->coalesce / 8; @@ -2380,10 +2618,19 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp) case RTL_VER_08: case RTL_VER_09: /* The RTL8153B uses USB_RX_EXTRA_AGGR_TMR for rx timeout - * primarily. For USB_RX_EARLY_TIMEOUT, we fix it to 128ns. + * primarily. For USB_RX_EARLY_TIMEOUT, we fix it to 1264ns. */ ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT, - 128 / 8); + 1264 / 8); + ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EXTRA_AGGR_TMR, + ocp_data); + break; + + case RTL_TEST_01: + case RTL_VER_10: + case RTL_VER_11: + ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT, + 640 / 8); ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EXTRA_AGGR_TMR, ocp_data); r8153b_rx_agg_chg_indicate(tp); @@ -2396,7 +2643,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp) static void r8153_set_rx_early_size(struct r8152 *tp) { - u32 ocp_data = agg_buf_sz - rx_reserved_size(tp->netdev->mtu); + u32 ocp_data = tp->rx_buf_sz - rx_reserved_size(tp->netdev->mtu); switch (tp->version) { case RTL_VER_03: @@ -2408,6 +2655,9 @@ static void r8153_set_rx_early_size(struct r8152 *tp) break; case RTL_VER_08: case RTL_VER_09: + case RTL_TEST_01: + case RTL_VER_10: + case RTL_VER_11: ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data / 8); r8153b_rx_agg_chg_indicate(tp); @@ -2420,6 +2670,9 @@ static void r8153_set_rx_early_size(struct r8152 *tp) static int rtl8153_enable(struct r8152 *tp) { + u32 ocp_data; + u16 speed; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) return -ENODEV; @@ -2428,6 +2681,34 @@ static int rtl8153_enable(struct r8152 *tp) r8153_set_rx_early_timeout(tp); r8153_set_rx_early_size(tp); + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR1); + ocp_data &= ~(BIT(3) | BIT(9) | BIT(8)); + speed = rtl8152_get_speed(tp); + if ((speed & (_10bps | _100bps)) && !(speed & FULL_DUP)) { + ocp_data |= BIT(9); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR1, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); + ocp_data &= ~BIT(8); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); + } else { + ocp_data |= BIT(9) | BIT(8); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR1, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); + ocp_data |= BIT(8); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); + } + + if (tp->version == RTL_VER_09) { + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, 0xd4e8); + ocp_data &= ~BIT(1); + ocp_write_word(tp, MCU_TYPE_USB, 0xd4e8, ocp_data); + usleep_range(1000, 2000); + ocp_data |= BIT(1); + ocp_write_word(tp, MCU_TYPE_USB, 0xd4e8, ocp_data); + } + return rtl_enable(tp); } @@ -2467,7 +2748,51 @@ static void rtl_disable(struct r8152 *tp) rtl_stop_rx(tp); - rtl8152_nic_reset(tp); + switch (tp->version) { + case RTL_VER_01: + case RTL_VER_02: + case RTL_VER_03: + case RTL_VER_04: + case RTL_VER_05: + case RTL_VER_06: + case RTL_VER_07: + case RTL_VER_08: + case RTL_VER_09: + rtl8152_nic_reset(tp); + break; + + case RTL_TEST_01: + case RTL_VER_10: + case RTL_VER_11: + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR); + ocp_data &= ~CR_TE; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, 0xd4b0); + ocp_data &= ~BIT(0); + ocp_write_word(tp, MCU_TYPE_USB, 0xd4b0, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, 0xd406); + ocp_data |= BIT(3); + ocp_write_word(tp, MCU_TYPE_USB, 0xd406, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR); + ocp_data &= ~CR_RE; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, 0xd4b0); + ocp_data |= BIT(0); + ocp_write_word(tp, MCU_TYPE_USB, 0xd4b0, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, 0xd406); + ocp_data &= ~BIT(3); + ocp_write_word(tp, MCU_TYPE_USB, 0xd406, ocp_data); + break; + + default: + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0); + break; + } } static void r8152_power_cut_en(struct r8152 *tp, bool enable) @@ -2490,14 +2815,75 @@ static void rtl_rx_vlan_en(struct r8152 *tp, bool enable) { u32 ocp_data; - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); - if (enable) - ocp_data |= CPCR_RX_VLAN; - else - ocp_data &= ~CPCR_RX_VLAN; - ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); + switch (tp->version) { + case RTL_VER_01: + case RTL_VER_02: + case RTL_VER_03: + case RTL_VER_04: + case RTL_VER_05: + case RTL_VER_06: + case RTL_VER_07: + case RTL_VER_08: + case RTL_VER_09: + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); + if (enable) + ocp_data |= CPCR_RX_VLAN; + else + ocp_data &= ~CPCR_RX_VLAN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); + break; + + case RTL_TEST_01: + case RTL_VER_10: + case RTL_VER_11: + default: + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, 0xc012); + if (enable) + ocp_data |= BIT(7) | BIT(6); + else + ocp_data &= ~(BIT(7) | BIT(6)); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); + break; + } } +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + +static void +rtl8152_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) +{ + struct r8152 *tp = netdev_priv(dev); + + if (unlikely(tp->rtk_enable_diag)) + return; + + if (usb_autopm_get_interface(tp->intf) < 0) + return; + + mutex_lock(&tp->control); + + tp->vlgrp = grp; + if (tp->vlgrp) + rtl_rx_vlan_en(tp, true); + else + rtl_rx_vlan_en(tp, false); + + mutex_unlock(&tp->control); +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) + +static void rtl8152_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +{ + struct r8152 *tp = netdev_priv(dev); + + vlan_group_set_device(tp->vlgrp, vid, NULL); +} + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) */ + +#else + static int rtl8152_set_features(struct net_device *dev, netdev_features_t features) { @@ -2505,6 +2891,9 @@ static int rtl8152_set_features(struct net_device *dev, struct r8152 *tp = netdev_priv(dev); int ret; + if (unlikely(tp->rtk_enable_diag)) + return -EBUSY; + ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; @@ -2526,6 +2915,8 @@ out: return ret; } +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) */ + #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) static u32 __rtl_get_wol(struct r8152 *tp) @@ -2611,6 +3002,26 @@ static void r8153_mac_clk_spd(struct r8152 *tp, bool enable) } } +static void r8156_mac_clk_spd(struct r8152 *tp, bool enable) +{ + u32 ocp_data; + + /* MAC clock speed down */ + if (enable) { + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, + 0x0403); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); + ocp_data &= ~0xff; + ocp_data |= MAC_CLK_SPDWN_EN | 0x03; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); + } else { + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); + ocp_data &= ~MAC_CLK_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); + } +} + static void r8153_u1u2en(struct r8152 *tp, bool enable) { u8 u1u2[8]; @@ -2648,14 +3059,158 @@ static void r8153_u2p3en(struct r8152 *tp, bool enable) ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data); } -static void r8153b_ups_flags_w1w0(struct r8152 *tp, u32 set, u32 clear) +static void r8153b_ups_flags(struct r8152 *tp) { - u32 ocp_data; + u32 ups_flags = 0; - ocp_data = ocp_read_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS); - ocp_data &= ~clear; - ocp_data |= set; - ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ocp_data); + if (tp->ups_info.green) + ups_flags |= UPS_FLAGS_EN_GREEN; + + if (tp->ups_info.aldps) + ups_flags |= UPS_FLAGS_EN_ALDPS; + + if (tp->ups_info.eee) + ups_flags |= UPS_FLAGS_EN_EEE; + + if (tp->ups_info.flow_control) + ups_flags |= UPS_FLAGS_EN_FLOW_CTR; + + if (tp->ups_info.eee_ckdiv) + ups_flags |= UPS_FLAGS_EN_EEE_CKDIV; + + if (tp->ups_info.eee_cmod_lv) + ups_flags |= UPS_FLAGS_EEE_CMOD_LV_EN; + + if (tp->ups_info._10m_ckdiv) + ups_flags |= UPS_FLAGS_EN_10M_CKDIV; + + if (tp->ups_info.eee_plloff_100) + ups_flags |= UPS_FLAGS_EEE_PLLOFF_100; + + if (tp->ups_info.eee_plloff_giga) + ups_flags |= UPS_FLAGS_EEE_PLLOFF_GIGA; + + if (tp->ups_info._250m_ckdiv) + ups_flags |= UPS_FLAGS_250M_CKDIV; + + if (tp->ups_info.ctap_short_off) + ups_flags |= UPS_FLAGS_CTAP_SHORT_DIS; + + switch (tp->ups_info.speed_duplex) { + case NWAY_10M_HALF: + ups_flags |= 1 << 16; + break; + case NWAY_10M_FULL: + ups_flags |= 2 << 16; + break; + case NWAY_100M_HALF: + ups_flags |= 3 << 16; + break; + case NWAY_100M_FULL: + ups_flags |= 4 << 16; + break; + case NWAY_1000M_FULL: + ups_flags |= 5 << 16; + break; + case FORCE_10M_HALF: + ups_flags |= 6 << 16; + break; + case FORCE_10M_FULL: + ups_flags |= 7 << 16; + break; + case FORCE_100M_HALF: + ups_flags |= 8 << 16; + break; + case FORCE_100M_FULL: + ups_flags |= 9 << 16; + break; + default: + break; + } + + ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ups_flags); +} + +static void r8156_ups_flags(struct r8152 *tp) +{ + u32 ups_flags = 0; + + if (tp->ups_info.green) + ups_flags |= UPS_FLAGS_EN_GREEN; + + if (tp->ups_info.aldps) + ups_flags |= UPS_FLAGS_EN_ALDPS; + + if (tp->ups_info.eee) + ups_flags |= UPS_FLAGS_EN_EEE; + + if (tp->ups_info.flow_control) + ups_flags |= UPS_FLAGS_EN_FLOW_CTR; + + if (tp->ups_info.eee_ckdiv) + ups_flags |= UPS_FLAGS_EN_EEE_CKDIV; + + if (tp->ups_info._10m_ckdiv) + ups_flags |= UPS_FLAGS_EN_10M_CKDIV; + + if (tp->ups_info.eee_plloff_100) + ups_flags |= UPS_FLAGS_EEE_PLLOFF_100; + + if (tp->ups_info.eee_plloff_giga) + ups_flags |= UPS_FLAGS_EEE_PLLOFF_GIGA; + + if (tp->ups_info._250m_ckdiv) + ups_flags |= UPS_FLAGS_250M_CKDIV; + + switch (tp->ups_info.speed_duplex) { + case FORCE_10M_HALF: + ups_flags |= 0 << 16; + break; + case FORCE_10M_FULL: + ups_flags |= 1 << 16; + break; + case FORCE_100M_HALF: + ups_flags |= 2 << 16; + break; + case FORCE_100M_FULL: + ups_flags |= 3 << 16; + break; + case NWAY_10M_HALF: + ups_flags |= 4 << 16; + break; + case NWAY_10M_FULL: + ups_flags |= 5 << 16; + break; + case NWAY_100M_HALF: + ups_flags |= 6 << 16; + break; + case NWAY_100M_FULL: + ups_flags |= 7 << 16; + break; + case NWAY_1000M_FULL: + ups_flags |= 8 << 16; + break; + case NWAY_2500M_FULL: + ups_flags |= 9 << 16; + break; + default: + break; + } + + switch (tp->ups_info.lite_mode) { + case 0: + ups_flags |= 1 << 5; + break; + case 1: + ups_flags |= 0 << 5; + break; + case 2: + default: + ups_flags |= 2 << 5; + break; + } + + ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ups_flags); } static void r8153b_green_en(struct r8152 *tp, bool enable) @@ -2676,7 +3231,7 @@ static void r8153b_green_en(struct r8152 *tp, bool enable) data |= GREEN_ETH_EN; sram_write(tp, SRAM_GREEN_CFG, data); - r8153b_ups_flags_w1w0(tp, UPS_FLAGS_EN_GREEN, 0); + tp->ups_info.green = enable; } static u16 r8153_phy_status(struct r8152 *tp, u16 desired) @@ -2708,6 +3263,8 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable) u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_POWER_CUT); if (enable) { + r8153b_ups_flags(tp); + ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN; ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); @@ -2715,7 +3272,9 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable) ocp_data |= BIT(0); ocp_write_byte(tp, MCU_TYPE_USB, 0xcfff, ocp_data); } else { + bool pcut_enter; u16 data; + int i; ocp_data &= ~(UPS_EN | USP_PREWAKE); ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); @@ -2725,20 +3284,26 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable) ocp_write_byte(tp, MCU_TYPE_USB, 0xcfff, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); + pcut_enter = !!(ocp_data & PCUT_STATUS); ocp_data &= ~PCUT_STATUS; ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); + for (i = 0; pcut_enter && i < 500; i++) { + if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & + AUTOLOAD_DONE) + break; + msleep(20); + } + data = r8153_phy_status(tp, 0); switch (data) { case PHY_STAT_PWRDN: case PHY_STAT_EXT_INIT: - r8153b_green_en(tp, - test_bit(GREEN_ETHERNET, &tp->flags)); + tp->rtl_ops.hw_phy_cfg(tp); data = r8152_mdio_read(tp, MII_BMCR); data &= ~BMCR_PDOWN; - data |= BMCR_RESET; r8152_mdio_write(tp, MII_BMCR, data); data = r8153_phy_status(tp, PHY_STAT_LAN_ON); @@ -2747,11 +3312,48 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable) if (data != PHY_STAT_LAN_ON) netif_warn(tp, link, tp->netdev, "PHY not ready"); + + if (!pcut_enter) + break; + + rtl8152_set_speed(tp, tp->autoneg, tp->speed, + tp->duplex, tp->advertising); break; } } } +static void r8156_ups_en(struct r8152 *tp, bool enable) +{ + u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_POWER_CUT); + + if (enable) { + r8156_ups_flags(tp); + + ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN; + ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, 0xcfff); + ocp_data |= BIT(0); + ocp_write_byte(tp, MCU_TYPE_USB, 0xcfff, ocp_data); + } else { + ocp_data &= ~(UPS_EN | USP_PREWAKE); + ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, 0xcfff); + ocp_data &= ~BIT(0); + ocp_write_byte(tp, MCU_TYPE_USB, 0xcfff, ocp_data); + +// ocp_data = ocp_read_word(tp, MCU_TYPE_USB, 0xd32a); +// ocp_data &= ~(BIT(8) | BIT(9)); +// ocp_write_word(tp, MCU_TYPE_USB, 0xd32a, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); + if (ocp_data & PCUT_STATUS) + tp->rtl_ops.hw_phy_cfg(tp); + } +} + static void r8153_power_cut_en(struct r8152 *tp, bool enable) { u32 ocp_data; @@ -2784,20 +3386,24 @@ static void r8153b_power_cut_en(struct r8152 *tp, bool enable) ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); } -static void r8153b_queue_wake(struct r8152 *tp, bool enable) +static void r8153_queue_wake(struct r8152 *tp, bool enable) { u32 ocp_data; - ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, 0xd38a); + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, 0xd38c); if (enable) ocp_data |= BIT(0); else ocp_data &= ~BIT(0); + ocp_write_byte(tp, MCU_TYPE_PLA, 0xd38c, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, 0xd38a); + ocp_data &= ~BIT(0); ocp_write_byte(tp, MCU_TYPE_PLA, 0xd38a, ocp_data); - ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, 0xd38c); - ocp_data &= ~BIT(0); - ocp_write_byte(tp, MCU_TYPE_PLA, 0xd38c, ocp_data); + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, 0xd398); + ocp_data &= ~BIT(8); + ocp_write_word(tp, MCU_TYPE_PLA, 0xd398, ocp_data); } static bool rtl_can_wakeup(struct r8152 *tp) @@ -2839,11 +3445,15 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable) static void rtl8153_runtime_enable(struct r8152 *tp, bool enable) { if (enable) { + if (tp->version == RTL_VER_06) + r8153_queue_wake(tp, true); r8153_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_mac_clk_spd(tp, true); rtl_runtime_suspend_enable(tp, true); } else { + if (tp->version == RTL_VER_06) + r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); r8153_mac_clk_spd(tp, false); @@ -2865,20 +3475,56 @@ static void rtl8153_runtime_enable(struct r8152 *tp, bool enable) static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable) { if (enable) { - r8153b_queue_wake(tp, true); + r8153_queue_wake(tp, true); r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); rtl_runtime_suspend_enable(tp, true); r8153b_ups_en(tp, true); } else { r8153b_ups_en(tp, false); - r8153b_queue_wake(tp, false); + r8153_queue_wake(tp, false); + rtl_runtime_suspend_enable(tp, false); +// r8153_u2p3en(tp, true); + r8153b_u1u2en(tp, true); + } +} + +static void rtl8156_runtime_enable(struct r8152 *tp, bool enable) +{ + if (enable) { + r8153_queue_wake(tp, true); + r8153b_u1u2en(tp, false); + r8153_u2p3en(tp, false); + rtl_runtime_suspend_enable(tp, true); +// if (tp->version != RTL_VER_10 || +// tp->udev->speed == USB_SPEED_HIGH) +// r8156_ups_en(tp, true); + } else { + r8156_ups_en(tp, false); + r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); r8153_u2p3en(tp, true); r8153b_u1u2en(tp, true); } } +static int rtl_nway_restart(struct r8152 *tp) +{ + int r = -EINVAL; + int bmcr; + + /* if autoneg is off, it's an error */ + bmcr = r8152_mdio_read(tp, MII_BMCR); + + if (bmcr & BMCR_ANENABLE) { + bmcr |= BMCR_ANRESTART; + r8152_mdio_write(tp, MII_BMCR, bmcr); + r = 0; + } + + return r; +} + static void r8153_teredo_off(struct r8152 *tp) { u32 ocp_data; @@ -2899,6 +3545,9 @@ static void r8153_teredo_off(struct r8152 *tp) case RTL_VER_08: case RTL_VER_09: + case RTL_TEST_01: + case RTL_VER_10: + case RTL_VER_11: /* The bit 0 ~ 7 are relative with teredo settings. They are * W1C (write 1 to clear), so set all 1 to disable it. */ @@ -2925,6 +3574,661 @@ static void rtl_reset_bmu(struct r8152 *tp) ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); } +static void rtl_clear_bp(struct r8152 *tp) +{ + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0); + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0); + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0); + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0); + ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0); + ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0); + ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0); + ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0); + usleep_range(3000, 6000); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0); +} + +static void r8153_clear_bp(struct r8152 *tp) +{ + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0); + ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0); + rtl_clear_bp(tp); +} + +static void r8153b_clear_bp(struct r8152 *tp, u16 type) +{ + if (type == MCU_TYPE_PLA) + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0); + else + ocp_write_byte(tp, MCU_TYPE_USB, USB_BP2_EN, 0); + + ocp_write_word(tp, type, PLA_BP_0, 0); + ocp_write_word(tp, type, PLA_BP_1, 0); + ocp_write_word(tp, type, PLA_BP_2, 0); + ocp_write_word(tp, type, PLA_BP_3, 0); + ocp_write_word(tp, type, PLA_BP_4, 0); + ocp_write_word(tp, type, PLA_BP_5, 0); + ocp_write_word(tp, type, PLA_BP_6, 0); + ocp_write_word(tp, type, PLA_BP_7, 0); + + if (type == MCU_TYPE_USB) { + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_8, 0); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_9, 0); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_10, 0); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_11, 0); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_12, 0); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_13, 0); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_14, 0); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_15, 0); + } + usleep_range(3000, 6000); + ocp_write_word(tp, type, PLA_BP_BA, 0); +} + +static void patch4(struct r8152 *tp) +{ + u8 data; + + data = ocp_read_byte(tp, MCU_TYPE_USB, 0xd429); + data |= 0x80; + ocp_write_byte(tp, MCU_TYPE_USB, 0xd429, data); + ocp_write_word(tp, MCU_TYPE_USB, 0xc0ce, 0x0210); + data = ocp_read_byte(tp, MCU_TYPE_USB, 0xd429); + data &= ~0x80; + ocp_write_byte(tp, MCU_TYPE_USB, 0xd429, data); +} + +static void r8152b_firmware(struct r8152 *tp) +{ + if (tp->version == RTL_VER_01) { + int i; + static u8 pla_patch_a[] = { + 0x08, 0xe0, 0x40, 0xe0, + 0x78, 0xe0, 0x85, 0xe0, + 0x5d, 0xe1, 0xa1, 0xe1, + 0xa3, 0xe1, 0xab, 0xe1, + 0x31, 0xc3, 0x60, 0x72, + 0xa0, 0x49, 0x10, 0xf0, + 0xa4, 0x49, 0x0e, 0xf0, + 0x2c, 0xc3, 0x62, 0x72, + 0x26, 0x70, 0x80, 0x49, + 0x05, 0xf0, 0x2f, 0x48, + 0x62, 0x9a, 0x24, 0x70, + 0x60, 0x98, 0x24, 0xc3, + 0x60, 0x99, 0x23, 0xc3, + 0x00, 0xbb, 0x2c, 0x75, + 0xdc, 0x21, 0xbc, 0x25, + 0x04, 0x13, 0x0a, 0xf0, + 0x03, 0x13, 0x08, 0xf0, + 0x02, 0x13, 0x06, 0xf0, + 0x01, 0x13, 0x04, 0xf0, + 0x08, 0x13, 0x02, 0xf0, + 0x03, 0xe0, 0xd4, 0x49, + 0x04, 0xf1, 0x14, 0xc2, + 0x12, 0xc3, 0x00, 0xbb, + 0x12, 0xc3, 0x60, 0x75, + 0xd0, 0x49, 0x05, 0xf1, + 0x50, 0x48, 0x60, 0x9d, + 0x09, 0xc6, 0x00, 0xbe, + 0xd0, 0x48, 0x60, 0x9d, + 0xf3, 0xe7, 0xc2, 0xc0, + 0x38, 0xd2, 0xc6, 0xd2, + 0x84, 0x17, 0xa2, 0x13, + 0x0c, 0x17, 0xbc, 0xc0, + 0xa2, 0xd1, 0x33, 0xc5, + 0xa0, 0x74, 0xc0, 0x49, + 0x1f, 0xf0, 0x30, 0xc5, + 0xa0, 0x73, 0x00, 0x13, + 0x04, 0xf1, 0xa2, 0x73, + 0x00, 0x13, 0x14, 0xf0, + 0x28, 0xc5, 0xa0, 0x74, + 0xc8, 0x49, 0x1b, 0xf1, + 0x26, 0xc5, 0xa0, 0x76, + 0xa2, 0x74, 0x01, 0x06, + 0x20, 0x37, 0xa0, 0x9e, + 0xa2, 0x9c, 0x1e, 0xc5, + 0xa2, 0x73, 0x23, 0x40, + 0x10, 0xf8, 0x04, 0xf3, + 0xa0, 0x73, 0x33, 0x40, + 0x0c, 0xf8, 0x15, 0xc5, + 0xa0, 0x74, 0x41, 0x48, + 0xa0, 0x9c, 0x14, 0xc5, + 0xa0, 0x76, 0x62, 0x48, + 0xe0, 0x48, 0xa0, 0x9e, + 0x10, 0xc6, 0x00, 0xbe, + 0x0a, 0xc5, 0xa0, 0x74, + 0x48, 0x48, 0xa0, 0x9c, + 0x0b, 0xc5, 0x20, 0x1e, + 0xa0, 0x9e, 0xe5, 0x48, + 0xa0, 0x9e, 0xf0, 0xe7, + 0xbc, 0xc0, 0xc8, 0xd2, + 0xcc, 0xd2, 0x28, 0xe4, + 0x22, 0x02, 0xf0, 0xc0, + 0x0b, 0xc0, 0x00, 0x71, + 0x0a, 0xc0, 0x00, 0x72, + 0xa0, 0x49, 0x04, 0xf0, + 0xa4, 0x49, 0x02, 0xf0, + 0x93, 0x48, 0x04, 0xc0, + 0x00, 0xb8, 0x00, 0xe4, + 0xc2, 0xc0, 0x8c, 0x09, + 0x14, 0xc2, 0x40, 0x73, + 0xba, 0x48, 0x40, 0x9b, + 0x11, 0xc2, 0x40, 0x73, + 0xb0, 0x49, 0x17, 0xf0, + 0xbf, 0x49, 0x03, 0xf1, + 0x09, 0xc5, 0x00, 0xbd, + 0xb1, 0x49, 0x11, 0xf0, + 0xb1, 0x48, 0x40, 0x9b, + 0x02, 0xc2, 0x00, 0xba, + 0x82, 0x18, 0x00, 0xa0, + 0x1e, 0xfc, 0xbc, 0xc0, + 0xf0, 0xc0, 0xde, 0xe8, + 0x00, 0x80, 0x00, 0x60, + 0x2c, 0x75, 0xd4, 0x49, + 0x12, 0xf1, 0x29, 0xe0, + 0xf8, 0xc2, 0x46, 0x71, + 0xf7, 0xc2, 0x40, 0x73, + 0xbe, 0x49, 0x03, 0xf1, + 0xf5, 0xc7, 0x02, 0xe0, + 0xf2, 0xc7, 0x4f, 0x30, + 0x26, 0x62, 0xa1, 0x49, + 0xf0, 0xf1, 0x22, 0x72, + 0xa0, 0x49, 0xed, 0xf1, + 0x25, 0x25, 0x18, 0x1f, + 0x97, 0x30, 0x91, 0x30, + 0x36, 0x9a, 0x2c, 0x75, + 0x32, 0xc3, 0x60, 0x73, + 0xb1, 0x49, 0x0d, 0xf1, + 0xdc, 0x21, 0xbc, 0x25, + 0x27, 0xc6, 0xc0, 0x77, + 0x04, 0x13, 0x18, 0xf0, + 0x03, 0x13, 0x19, 0xf0, + 0x02, 0x13, 0x1a, 0xf0, + 0x01, 0x13, 0x1b, 0xf0, + 0xd4, 0x49, 0x03, 0xf1, + 0x1c, 0xc5, 0x00, 0xbd, + 0xcd, 0xc6, 0xc6, 0x67, + 0x2e, 0x75, 0xd7, 0x22, + 0xdd, 0x26, 0x05, 0x15, + 0x1a, 0xf0, 0x14, 0xc6, + 0x00, 0xbe, 0x13, 0xc5, + 0x00, 0xbd, 0x12, 0xc5, + 0x00, 0xbd, 0xf1, 0x49, + 0xfb, 0xf1, 0xef, 0xe7, + 0xf4, 0x49, 0xfa, 0xf1, + 0xec, 0xe7, 0xf3, 0x49, + 0xf7, 0xf1, 0xe9, 0xe7, + 0xf2, 0x49, 0xf4, 0xf1, + 0xe6, 0xe7, 0xb6, 0xc0, + 0x6a, 0x14, 0xac, 0x13, + 0xd6, 0x13, 0xfa, 0x14, + 0xa0, 0xd1, 0x00, 0x00, + 0xc0, 0x75, 0xd0, 0x49, + 0x46, 0xf0, 0x26, 0x72, + 0xa7, 0x49, 0x43, 0xf0, + 0x22, 0x72, 0x25, 0x25, + 0x20, 0x1f, 0x97, 0x30, + 0x91, 0x30, 0x40, 0x73, + 0xf3, 0xc4, 0x1c, 0x40, + 0x04, 0xf0, 0xd7, 0x49, + 0x05, 0xf1, 0x37, 0xe0, + 0x53, 0x48, 0xc0, 0x9d, + 0x08, 0x02, 0x40, 0x66, + 0x64, 0x27, 0x06, 0x16, + 0x30, 0xf1, 0x46, 0x63, + 0x3b, 0x13, 0x2d, 0xf1, + 0x34, 0x9b, 0x18, 0x1b, + 0x93, 0x30, 0x2b, 0xc3, + 0x10, 0x1c, 0x2b, 0xe8, + 0x01, 0x14, 0x25, 0xf1, + 0x00, 0x1d, 0x26, 0x1a, + 0x8a, 0x30, 0x22, 0x73, + 0xb5, 0x25, 0x0e, 0x0b, + 0x00, 0x1c, 0x2c, 0xe8, + 0x1f, 0xc7, 0x27, 0x40, + 0x1a, 0xf1, 0x38, 0xe8, + 0x32, 0x1f, 0x8f, 0x30, + 0x08, 0x1b, 0x24, 0xe8, + 0x36, 0x72, 0x46, 0x77, + 0x00, 0x17, 0x0d, 0xf0, + 0x13, 0xc3, 0x1f, 0x40, + 0x03, 0xf1, 0x00, 0x1f, + 0x46, 0x9f, 0x44, 0x77, + 0x9f, 0x44, 0x5f, 0x44, + 0x17, 0xe8, 0x0a, 0xc7, + 0x27, 0x40, 0x05, 0xf1, + 0x02, 0xc3, 0x00, 0xbb, + 0x50, 0x1a, 0x06, 0x1a, + 0xff, 0xc7, 0x00, 0xbf, + 0xb8, 0xcd, 0xff, 0xff, + 0x02, 0x0c, 0x54, 0xa5, + 0xdc, 0xa5, 0x2f, 0x40, + 0x05, 0xf1, 0x00, 0x14, + 0xfa, 0xf1, 0x01, 0x1c, + 0x02, 0xe0, 0x00, 0x1c, + 0x80, 0xff, 0xb0, 0x49, + 0x04, 0xf0, 0x01, 0x0b, + 0xd3, 0xa1, 0x03, 0xe0, + 0x02, 0x0b, 0xd3, 0xa5, + 0x27, 0x31, 0x20, 0x37, + 0x02, 0x0b, 0xd3, 0xa5, + 0x27, 0x31, 0x20, 0x37, + 0x00, 0x13, 0xfb, 0xf1, + 0x80, 0xff, 0x22, 0x73, + 0xb5, 0x25, 0x18, 0x1e, + 0xde, 0x30, 0xd9, 0x30, + 0x64, 0x72, 0x11, 0x1e, + 0x68, 0x23, 0x16, 0x31, + 0x80, 0xff, 0xd4, 0x49, + 0x28, 0xf0, 0x02, 0xb4, + 0x2a, 0xc4, 0x00, 0x1d, + 0x2e, 0xe8, 0xe0, 0x73, + 0xb9, 0x21, 0xbd, 0x25, + 0x04, 0x13, 0x02, 0xf0, + 0x1a, 0xe0, 0x22, 0xc4, + 0x23, 0xc3, 0x2f, 0xe8, + 0x23, 0xc3, 0x2d, 0xe8, + 0x00, 0x1d, 0x21, 0xe8, + 0xe2, 0x73, 0xbb, 0x49, + 0xfc, 0xf0, 0xe0, 0x73, + 0xb7, 0x48, 0x03, 0xb4, + 0x81, 0x1d, 0x19, 0xe8, + 0x40, 0x1a, 0x84, 0x1d, + 0x16, 0xe8, 0x12, 0xc3, + 0x1e, 0xe8, 0x03, 0xb0, + 0x81, 0x1d, 0x11, 0xe8, + 0x0e, 0xc3, 0x19, 0xe8, + 0x02, 0xb0, 0x06, 0xc7, + 0x04, 0x1e, 0xe0, 0x9e, + 0x02, 0xc6, 0x00, 0xbe, + 0x22, 0x02, 0x20, 0xe4, + 0x04, 0xb8, 0x34, 0xb0, + 0x00, 0x02, 0x00, 0x03, + 0x00, 0x0e, 0x00, 0x0c, + 0x09, 0xc7, 0xe0, 0x9b, + 0xe2, 0x9a, 0xe4, 0x9c, + 0xe6, 0x8d, 0xe6, 0x76, + 0xef, 0x49, 0xfe, 0xf1, + 0x80, 0xff, 0x08, 0xea, + 0x82, 0x1d, 0xf5, 0xef, + 0x00, 0x1a, 0x88, 0x1d, + 0xf2, 0xef, 0xed, 0xc2, + 0xf0, 0xef, 0x80, 0xff, + 0x02, 0xc6, 0x00, 0xbe, + 0x46, 0x06, 0x08, 0xc2, + 0x40, 0x73, 0x3a, 0x48, + 0x40, 0x9b, 0x06, 0xff, + 0x02, 0xc6, 0x00, 0xbe, + 0x86, 0x17, 0x1e, 0xfc, + 0x36, 0xf0, 0x08, 0x1c, + 0xea, 0x8c, 0xe3, 0x64, + 0xc7, 0x49, 0x25, 0xf1, + 0xe0, 0x75, 0xff, 0x1b, + 0xeb, 0x47, 0xff, 0x1b, + 0x6b, 0x47, 0xe0, 0x9d, + 0x15, 0xc3, 0x60, 0x75, + 0xd8, 0x49, 0x04, 0xf0, + 0x81, 0x1d, 0xe2, 0x8d, + 0x05, 0xe0, 0xe2, 0x63, + 0x81, 0x1d, 0xdd, 0x47, + 0xe2, 0x8b, 0x0b, 0xc3, + 0x00, 0x1d, 0x61, 0x8d, + 0x3c, 0x03, 0x60, 0x75, + 0xd8, 0x49, 0x06, 0xf1, + 0xdf, 0x48, 0x61, 0x95, + 0x16, 0xe0, 0x4e, 0xe8, + 0x12, 0xe8, 0x21, 0xc5, + 0xa0, 0x73, 0xb0, 0x49, + 0x03, 0xf0, 0x31, 0x48, + 0xa0, 0x9b, 0x0d, 0xe0, + 0xc0, 0x49, 0x0b, 0xf1, + 0xe2, 0x63, 0x7e, 0x1d, + 0xdd, 0x46, 0xe2, 0x8b, + 0xe0, 0x75, 0x83, 0x1b, + 0xeb, 0x46, 0xfe, 0x1b, + 0x6b, 0x46, 0xe0, 0x9d, + 0xe4, 0x49, 0x11, 0xf0, + 0x10, 0x1d, 0xea, 0x8d, + 0xe3, 0x64, 0xc6, 0x49, + 0x09, 0xf1, 0x07, 0xc5, + 0xa0, 0x73, 0xb1, 0x48, + 0xa0, 0x9b, 0x02, 0xc5, + 0x00, 0xbd, 0xe6, 0x04, + 0xa0, 0xd1, 0x02, 0xc5, + 0x00, 0xbd, 0xfe, 0x04, + 0x02, 0xc5, 0x00, 0xbd, + 0x30, 0x05, 0x00, 0x00 }; + static u16 ram_code1[] = { + 0x9700, 0x7fe0, 0x4c00, 0x4007, + 0x4400, 0x4800, 0x7c1f, 0x4c00, + 0x5310, 0x6000, 0x7c07, 0x6800, + 0x673e, 0x0000, 0x0000, 0x571f, + 0x5ffb, 0xaa05, 0x5b58, 0x7d80, + 0x6100, 0x3019, 0x5b64, 0x7d80, + 0x6080, 0xa6f8, 0xdcdb, 0x0015, + 0xb915, 0xb511, 0xd16b, 0x000f, + 0xb40f, 0xd06b, 0x000d, 0xb206, + 0x7c01, 0x5800, 0x7c04, 0x5c00, + 0x3011, 0x7c01, 0x5801, 0x7c04, + 0x5c04, 0x3019, 0x30a5, 0x3127, + 0x31d5, 0x7fe0, 0x4c60, 0x7c07, + 0x6803, 0x7d00, 0x6900, 0x65a0, + 0x0000, 0x0000, 0xaf03, 0x6015, + 0x303e, 0x6017, 0x57e0, 0x580c, + 0x588c, 0x7fdd, 0x5fa2, 0x4827, + 0x7c1f, 0x4c00, 0x7c1f, 0x4c10, + 0x8400, 0x7c30, 0x6020, 0x48bf, + 0x7c1f, 0x4c00, 0x7c1f, 0x4c01, + 0x7c07, 0x6803, 0xb806, 0x7c08, + 0x6800, 0x0000, 0x0000, 0x305c, + 0x7c08, 0x6808, 0x0000, 0x0000, + 0xae06, 0x7c02, 0x5c02, 0x0000, + 0x0000, 0x3067, 0x8e05, 0x7c02, + 0x5c00, 0x0000, 0x0000, 0xad06, + 0x7c20, 0x5c20, 0x0000, 0x0000, + 0x3072, 0x8d05, 0x7c20, 0x5c00, + 0x0000, 0x0000, 0xa008, 0x7c07, + 0x6800, 0xb8db, 0x7c07, 0x6803, + 0xd9b3, 0x00d7, 0x7fe0, 0x4c80, + 0x7c08, 0x6800, 0x0000, 0x0000, + 0x7c23, 0x5c23, 0x481d, 0x7c1f, + 0x4c00, 0x7c1f, 0x4c02, 0x5310, + 0x81ff, 0x30f5, 0x7fe0, 0x4d00, + 0x4832, 0x7c1f, 0x4c00, 0x7c1f, + 0x4c10, 0x7c08, 0x6000, 0xa49e, + 0x7c07, 0x6800, 0xb89b, 0x7c07, + 0x6803, 0xd9b3, 0x00f9, 0x7fe0, + 0x4d20, 0x7e00, 0x6200, 0x3001, + 0x7fe0, 0x4dc0, 0xd09d, 0x0002, + 0xb4fe, 0x7fe0, 0x4d80, 0x7c04, + 0x6004, 0x7c07, 0x6802, 0x6728, + 0x0000, 0x0000, 0x7c08, 0x6000, + 0x486c, 0x7c1f, 0x4c00, 0x7c1f, + 0x4c01, 0x9503, 0x7e00, 0x6200, + 0x571f, 0x5fbb, 0xaa05, 0x5b58, + 0x7d80, 0x6100, 0x30c2, 0x5b64, + 0x7d80, 0x6080, 0xcdab, 0x0063, + 0xcd8d, 0x0061, 0xd96b, 0x005f, + 0xd0a0, 0x00d7, 0xcba0, 0x0003, + 0x80ec, 0x30cf, 0x30dc, 0x7fe0, + 0x4ce0, 0x4832, 0x7c1f, 0x4c00, + 0x7c1f, 0x4c08, 0x7c08, 0x6008, + 0x8300, 0xb902, 0x30a5, 0x308a, + 0x7fe0, 0x4da0, 0x65a8, 0x0000, + 0x0000, 0x56a0, 0x590c, 0x7ffd, + 0x5fa2, 0xae06, 0x7c02, 0x5c02, + 0x0000, 0x0000, 0x30f0, 0x8e05, + 0x7c02, 0x5c00, 0x0000, 0x0000, + 0xcba4, 0x0004, 0xcd8d, 0x0002, + 0x80f1, 0x7fe0, 0x4ca0, 0x7c08, + 0x6408, 0x0000, 0x0000, 0x7d00, + 0x6800, 0xb603, 0x7c10, 0x6010, + 0x7d1f, 0x551f, 0x5fb3, 0xaa07, + 0x7c80, 0x5800, 0x5b58, 0x7d80, + 0x6100, 0x310f, 0x7c80, 0x5800, + 0x5b64, 0x7d80, 0x6080, 0x4827, + 0x7c1f, 0x4c00, 0x7c1f, 0x4c10, + 0x8400, 0x7c10, 0x6000, 0x7fe0, + 0x4cc0, 0x5fbb, 0x4824, 0x7c1f, + 0x4c00, 0x7c1f, 0x4c04, 0x8200, + 0x7ce0, 0x5400, 0x6728, 0x0000, + 0x0000, 0x30cf, 0x3001, 0x7fe0, + 0x4e00, 0x4007, 0x4400, 0x5310, + 0x7c07, 0x6800, 0x673e, 0x0000, + 0x0000, 0x570f, 0x5fff, 0xaa05, + 0x585b, 0x7d80, 0x6100, 0x313b, + 0x5867, 0x7d80, 0x6080, 0x9403, + 0x7e00, 0x6200, 0xcda3, 0x00e7, + 0xcd85, 0x00e5, 0xd96b, 0x00e3, + 0x96e3, 0x7c07, 0x6800, 0x673e, + 0x0000, 0x0000, 0x7fe0, 0x4e20, + 0x96db, 0x8b04, 0x7c08, 0x5008, + 0xab03, 0x7c08, 0x5000, 0x7c07, + 0x6801, 0x677e, 0x0000, 0x0000, + 0xdb7c, 0x00ec, 0x0000, 0x7fe1, + 0x4f40, 0x4837, 0x4418, 0x41c7, + 0x7fe0, 0x4e40, 0x7c40, 0x5400, + 0x7c1f, 0x4c01, 0x7c1f, 0x4c01, + 0x8fbf, 0xd2a0, 0x004b, 0x9204, + 0xa042, 0x3168, 0x3127, 0x7fe1, + 0x4f60, 0x489c, 0x4628, 0x7fe0, + 0x4e60, 0x7e28, 0x4628, 0x7c40, + 0x5400, 0x7c01, 0x5800, 0x7c04, + 0x5c00, 0x41e8, 0x7c1f, 0x4c01, + 0x7c1f, 0x4c01, 0x8fa5, 0xb241, + 0xa02a, 0x3182, 0x7fe0, 0x4ea0, + 0x7c02, 0x4402, 0x4448, 0x4894, + 0x7c1f, 0x4c01, 0x7c1f, 0x4c03, + 0x4824, 0x7c1f, 0x4c07, 0x41ef, + 0x41ff, 0x4891, 0x7c1f, 0x4c07, + 0x7c1f, 0x4c17, 0x8400, 0x8ef8, + 0x41c7, 0x8f8a, 0x92d5, 0xa10f, + 0xd480, 0x0008, 0xd580, 0x00b8, + 0xa202, 0x319d, 0x7c04, 0x4404, + 0x319d, 0xd484, 0x00f3, 0xd484, + 0x00f1, 0x3127, 0x7fe0, 0x4ee0, + 0x7c40, 0x5400, 0x4488, 0x41cf, + 0x3127, 0x7fe0, 0x4ec0, 0x48f3, + 0x7c1f, 0x4c01, 0x7c1f, 0x4c09, + 0x4508, 0x41c7, 0x8fb0, 0xd218, + 0x00ae, 0xd2a4, 0x009e, 0x31be, + 0x7fe0, 0x4e80, 0x4832, 0x7c1f, + 0x4c01, 0x7c1f, 0x4c11, 0x4428, + 0x7c40, 0x5440, 0x7c01, 0x5801, + 0x7c04, 0x5c04, 0x41e8, 0xa4b3, + 0x31d3, 0x7fe0, 0x4f20, 0x7c07, + 0x6800, 0x673e, 0x0000, 0x0000, + 0x570f, 0x5fff, 0xaa04, 0x585b, + 0x6100, 0x31e4, 0x5867, 0x6080, + 0xbcf1, 0x3001 }; + + patch4(tp); + rtl_clear_bp(tp); + + generic_ocp_write(tp, 0xf800, 0x3f, sizeof(pla_patch_a), + pla_patch_a, MCU_TYPE_PLA); + + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc26, 0x8000); + + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc28, 0x170b); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc2a, 0x01e1); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc2c, 0x0989); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc2e, 0x1349); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc30, 0x01b7); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc32, 0x061d); + + ocp_write_word(tp, MCU_TYPE_PLA, 0xe422, 0x0020); + ocp_write_word(tp, MCU_TYPE_PLA, 0xe420, 0x0018); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc34, 0x1785); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc36, 0x047b); + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, 0x2000); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb092, 0x7070); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb098, 0x0600); + for (i = 0; i < ARRAY_SIZE(ram_code1); i++) + ocp_write_word(tp, MCU_TYPE_PLA, 0xb09a, ram_code1[i]); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb098, 0x0200); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb092, 0x7030); + } else if (tp->version == RTL_VER_02) { + static u8 pla_patch_a2[] = { + 0x08, 0xe0, 0x1a, 0xe0, + 0xf2, 0xe0, 0xfa, 0xe0, + 0x32, 0xe1, 0x34, 0xe1, + 0x36, 0xe1, 0x38, 0xe1, + 0x2c, 0x75, 0xdc, 0x21, + 0xbc, 0x25, 0x04, 0x13, + 0x0b, 0xf0, 0x03, 0x13, + 0x09, 0xf0, 0x02, 0x13, + 0x07, 0xf0, 0x01, 0x13, + 0x05, 0xf0, 0x08, 0x13, + 0x03, 0xf0, 0x04, 0xc3, + 0x00, 0xbb, 0x03, 0xc3, + 0x00, 0xbb, 0xd2, 0x17, + 0xbc, 0x17, 0x14, 0xc2, + 0x40, 0x73, 0xba, 0x48, + 0x40, 0x9b, 0x11, 0xc2, + 0x40, 0x73, 0xb0, 0x49, + 0x17, 0xf0, 0xbf, 0x49, + 0x03, 0xf1, 0x09, 0xc5, + 0x00, 0xbd, 0xb1, 0x49, + 0x11, 0xf0, 0xb1, 0x48, + 0x40, 0x9b, 0x02, 0xc2, + 0x00, 0xba, 0x4e, 0x19, + 0x00, 0xa0, 0x1e, 0xfc, + 0xbc, 0xc0, 0xf0, 0xc0, + 0xde, 0xe8, 0x00, 0x80, + 0x00, 0x60, 0x2c, 0x75, + 0xd4, 0x49, 0x12, 0xf1, + 0x29, 0xe0, 0xf8, 0xc2, + 0x46, 0x71, 0xf7, 0xc2, + 0x40, 0x73, 0xbe, 0x49, + 0x03, 0xf1, 0xf5, 0xc7, + 0x02, 0xe0, 0xf2, 0xc7, + 0x4f, 0x30, 0x26, 0x62, + 0xa1, 0x49, 0xf0, 0xf1, + 0x22, 0x72, 0xa0, 0x49, + 0xed, 0xf1, 0x25, 0x25, + 0x18, 0x1f, 0x97, 0x30, + 0x91, 0x30, 0x36, 0x9a, + 0x2c, 0x75, 0x32, 0xc3, + 0x60, 0x73, 0xb1, 0x49, + 0x0d, 0xf1, 0xdc, 0x21, + 0xbc, 0x25, 0x27, 0xc6, + 0xc0, 0x77, 0x04, 0x13, + 0x18, 0xf0, 0x03, 0x13, + 0x19, 0xf0, 0x02, 0x13, + 0x1a, 0xf0, 0x01, 0x13, + 0x1b, 0xf0, 0xd4, 0x49, + 0x03, 0xf1, 0x1c, 0xc5, + 0x00, 0xbd, 0xcd, 0xc6, + 0xc6, 0x67, 0x2e, 0x75, + 0xd7, 0x22, 0xdd, 0x26, + 0x05, 0x15, 0x1a, 0xf0, + 0x14, 0xc6, 0x00, 0xbe, + 0x13, 0xc5, 0x00, 0xbd, + 0x12, 0xc5, 0x00, 0xbd, + 0xf1, 0x49, 0xfb, 0xf1, + 0xef, 0xe7, 0xf4, 0x49, + 0xfa, 0xf1, 0xec, 0xe7, + 0xf3, 0x49, 0xf7, 0xf1, + 0xe9, 0xe7, 0xf2, 0x49, + 0xf4, 0xf1, 0xe6, 0xe7, + 0xb6, 0xc0, 0xf6, 0x14, + 0x36, 0x14, 0x62, 0x14, + 0x86, 0x15, 0xa0, 0xd1, + 0x00, 0x00, 0xc0, 0x75, + 0xd0, 0x49, 0x46, 0xf0, + 0x26, 0x72, 0xa7, 0x49, + 0x43, 0xf0, 0x22, 0x72, + 0x25, 0x25, 0x20, 0x1f, + 0x97, 0x30, 0x91, 0x30, + 0x40, 0x73, 0xf3, 0xc4, + 0x1c, 0x40, 0x04, 0xf0, + 0xd7, 0x49, 0x05, 0xf1, + 0x37, 0xe0, 0x53, 0x48, + 0xc0, 0x9d, 0x08, 0x02, + 0x40, 0x66, 0x64, 0x27, + 0x06, 0x16, 0x30, 0xf1, + 0x46, 0x63, 0x3b, 0x13, + 0x2d, 0xf1, 0x34, 0x9b, + 0x18, 0x1b, 0x93, 0x30, + 0x2b, 0xc3, 0x10, 0x1c, + 0x2b, 0xe8, 0x01, 0x14, + 0x25, 0xf1, 0x00, 0x1d, + 0x26, 0x1a, 0x8a, 0x30, + 0x22, 0x73, 0xb5, 0x25, + 0x0e, 0x0b, 0x00, 0x1c, + 0x2c, 0xe8, 0x1f, 0xc7, + 0x27, 0x40, 0x1a, 0xf1, + 0x38, 0xe8, 0x32, 0x1f, + 0x8f, 0x30, 0x08, 0x1b, + 0x24, 0xe8, 0x36, 0x72, + 0x46, 0x77, 0x00, 0x17, + 0x0d, 0xf0, 0x13, 0xc3, + 0x1f, 0x40, 0x03, 0xf1, + 0x00, 0x1f, 0x46, 0x9f, + 0x44, 0x77, 0x9f, 0x44, + 0x5f, 0x44, 0x17, 0xe8, + 0x0a, 0xc7, 0x27, 0x40, + 0x05, 0xf1, 0x02, 0xc3, + 0x00, 0xbb, 0x1c, 0x1b, + 0xd2, 0x1a, 0xff, 0xc7, + 0x00, 0xbf, 0xb8, 0xcd, + 0xff, 0xff, 0x02, 0x0c, + 0x54, 0xa5, 0xdc, 0xa5, + 0x2f, 0x40, 0x05, 0xf1, + 0x00, 0x14, 0xfa, 0xf1, + 0x01, 0x1c, 0x02, 0xe0, + 0x00, 0x1c, 0x80, 0xff, + 0xb0, 0x49, 0x04, 0xf0, + 0x01, 0x0b, 0xd3, 0xa1, + 0x03, 0xe0, 0x02, 0x0b, + 0xd3, 0xa5, 0x27, 0x31, + 0x20, 0x37, 0x02, 0x0b, + 0xd3, 0xa5, 0x27, 0x31, + 0x20, 0x37, 0x00, 0x13, + 0xfb, 0xf1, 0x80, 0xff, + 0x22, 0x73, 0xb5, 0x25, + 0x18, 0x1e, 0xde, 0x30, + 0xd9, 0x30, 0x64, 0x72, + 0x11, 0x1e, 0x68, 0x23, + 0x16, 0x31, 0x80, 0xff, + 0x08, 0xc2, 0x40, 0x73, + 0x3a, 0x48, 0x40, 0x9b, + 0x06, 0xff, 0x02, 0xc6, + 0x00, 0xbe, 0x4e, 0x18, + 0x1e, 0xfc, 0x33, 0xc5, + 0xa0, 0x74, 0xc0, 0x49, + 0x1f, 0xf0, 0x30, 0xc5, + 0xa0, 0x73, 0x00, 0x13, + 0x04, 0xf1, 0xa2, 0x73, + 0x00, 0x13, 0x14, 0xf0, + 0x28, 0xc5, 0xa0, 0x74, + 0xc8, 0x49, 0x1b, 0xf1, + 0x26, 0xc5, 0xa0, 0x76, + 0xa2, 0x74, 0x01, 0x06, + 0x20, 0x37, 0xa0, 0x9e, + 0xa2, 0x9c, 0x1e, 0xc5, + 0xa2, 0x73, 0x23, 0x40, + 0x10, 0xf8, 0x04, 0xf3, + 0xa0, 0x73, 0x33, 0x40, + 0x0c, 0xf8, 0x15, 0xc5, + 0xa0, 0x74, 0x41, 0x48, + 0xa0, 0x9c, 0x14, 0xc5, + 0xa0, 0x76, 0x62, 0x48, + 0xe0, 0x48, 0xa0, 0x9e, + 0x10, 0xc6, 0x00, 0xbe, + 0x0a, 0xc5, 0xa0, 0x74, + 0x48, 0x48, 0xa0, 0x9c, + 0x0b, 0xc5, 0x20, 0x1e, + 0xa0, 0x9e, 0xe5, 0x48, + 0xa0, 0x9e, 0xf0, 0xe7, + 0xbc, 0xc0, 0xc8, 0xd2, + 0xcc, 0xd2, 0x28, 0xe4, + 0x22, 0x02, 0xf0, 0xc0, + 0x02, 0xc6, 0x00, 0xbe, + 0x00, 0x00, 0x02, 0xc6, + 0x00, 0xbe, 0x00, 0x00, + 0x02, 0xc6, 0x00, 0xbe, + 0x00, 0x00, 0x02, 0xc6, + 0x00, 0xbe, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00 }; + + rtl_clear_bp(tp); + + generic_ocp_write(tp, 0xf800, 0xff, sizeof(pla_patch_a2), + pla_patch_a2, MCU_TYPE_PLA); + + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc26, 0x8000); + + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc28, 0x17a5); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc2a, 0x13ad); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc2c, 0x184d); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc2e, 0x01e1); + } +} + static void r8152_aldps_en(struct r8152 *tp, bool enable) { if (enable) { @@ -2944,6 +4248,7 @@ static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg) ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev); } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg) { u16 data; @@ -2954,6 +4259,7 @@ static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg) return data; } +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) */ static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data) { @@ -2996,7 +4302,7 @@ static void r8152_eee_en(struct r8152 *tp, bool enable) static void r8152b_enable_eee(struct r8152 *tp) { r8152_eee_en(tp, true); - r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX); + r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, tp->eee_adv); } static void r8152b_enable_fc(struct r8152 *tp) @@ -3006,6 +4312,8 @@ static void r8152b_enable_fc(struct r8152 *tp) anar = r8152_mdio_read(tp, MII_ADVERTISE); anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; r8152_mdio_write(tp, MII_ADVERTISE, anar); + + tp->ups_info.flow_control = true; } static void rtl8152_disable(struct r8152 *tp) @@ -3017,6 +4325,8 @@ static void rtl8152_disable(struct r8152 *tp) static void r8152b_hw_phy_cfg(struct r8152 *tp) { + r8152b_firmware(tp); + r8152b_enable_eee(tp); r8152_aldps_en(tp, true); r8152b_enable_fc(tp); @@ -3155,19 +4465,22 @@ static void r8152b_enter_oob(struct r8152 *tp) static int r8153_patch_request(struct r8152 *tp, bool request) { - u16 data; + u16 data, check; int i; data = ocp_reg_read(tp, OCP_PHY_PATCH_CMD); - if (request) + if (request) { data |= PATCH_REQUEST; - else + check = 0; + } else { data &= ~PATCH_REQUEST; + check = PATCH_READY; + } ocp_reg_write(tp, OCP_PHY_PATCH_CMD, data); - for (i = 0; request && i < 5000; i++) { + for (i = 0; i < 5000; i++) { usleep_range(1000, 2000); - if (ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY) + if ((ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY) ^ check) break; } @@ -3180,6 +4493,2777 @@ static int r8153_patch_request(struct r8152 *tp, bool request) } } +static int r8153_pre_ram_code(struct r8152 *tp, u16 key_addr, u16 patch_key) +{ + if (r8153_patch_request(tp, true)) + return -ETIME; + + sram_write(tp, key_addr, patch_key); + sram_write(tp, 0xb82e, 0x0001); + + return 0; +} + +static int r8153_post_ram_code(struct r8152 *tp, u16 key_addr) +{ + u16 data; + + sram_write(tp, 0x0000, 0x0000); + + data = ocp_reg_read(tp, 0xb82e); + data &= ~0x0001; + ocp_reg_write(tp, 0xb82e, data); + + sram_write(tp, key_addr, 0x0000); + + r8153_patch_request(tp, false); + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base); + + return 0; +} + +static int r8156_lock_mian(struct r8152 *tp, bool lock) +{ + u16 data; + int i; + + data = ocp_reg_read(tp, 0xa46a); + if (lock) + data |= BIT(1); + else + data &= ~BIT(1); + ocp_reg_write(tp, 0xa46a, data); + + if (lock) { + for (i = 0; i < 100; i++) { + usleep_range(1000, 2000); + data = ocp_reg_read(tp, 0xa730) & 0xff; + if (data == 1) + break; + } + } else { + for (i = 0; i < 100; i++) { + usleep_range(1000, 2000); + data = ocp_reg_read(tp, 0xa730) & 0xff; + if (data != 1) + break; + } + } + + if (i == 100) + return -ETIME; + else + return 0; +} + +static void r8153_wdt1_end(struct r8152 *tp) +{ + int i; + + for (i = 0; i < 104; i++) { + if (!(ocp_read_byte(tp, MCU_TYPE_USB, 0xe404) & 1)) + break; + usleep_range(1000, 2000); + } +} + +static void r8153_firmware(struct r8152 *tp) +{ + if (tp->version == RTL_VER_03) { + r8153_clear_bp(tp); + + r8153_pre_ram_code(tp, 0x8146, 0x7000); + sram_write(tp, 0xb820, 0x0290); + sram_write(tp, 0xa012, 0x0000); + sram_write(tp, 0xa014, 0x2c04); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2c18); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2c45); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2c45); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xd502); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x8301); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x8306); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xd500); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x8208); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xd501); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xe018); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x0308); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x60f2); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x8404); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x607d); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xc117); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2c16); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xc116); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2c16); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x607d); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xc117); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xa404); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xd500); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x0800); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xd501); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x62d2); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x615d); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xc115); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xa404); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xc307); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xd502); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x8301); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x8306); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xd500); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x8208); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2c42); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xc114); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x8404); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xc317); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xd701); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x435d); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xd500); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xa208); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xd502); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xa306); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xa301); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2c42); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x8404); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x613d); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xc115); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xc307); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xd502); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x8301); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x8306); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xd500); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x8208); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2c42); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xc114); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xc317); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xd701); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x40dd); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xd500); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xa208); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xd502); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xa306); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xa301); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xd500); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xd702); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x0800); + sram_write(tp, 0xa01a, 0x0000); + sram_write(tp, 0xa006, 0x0fff); + sram_write(tp, 0xa004, 0x0fff); + sram_write(tp, 0xa002, 0x05a3); + sram_write(tp, 0xa000, 0x3591); + sram_write(tp, 0xb820, 0x0210); + r8153_post_ram_code(tp, 0x8146); + } else if (tp->version == RTL_VER_04) { + static u8 usb_patch_b[] = { + 0x08, 0xe0, 0x0f, 0xe0, + 0x18, 0xe0, 0x24, 0xe0, + 0x26, 0xe0, 0x3a, 0xe0, + 0x84, 0xe0, 0x9c, 0xe0, + 0xc2, 0x49, 0x04, 0xf0, + 0x02, 0xc0, 0x00, 0xb8, + 0x14, 0x18, 0x02, 0xc0, + 0x00, 0xb8, 0x2e, 0x18, + 0x06, 0x89, 0x08, 0xc0, + 0x0c, 0x61, 0x92, 0x48, + 0x93, 0x48, 0x0c, 0x89, + 0x02, 0xc0, 0x00, 0xb8, + 0x08, 0x05, 0x40, 0xb4, + 0x16, 0x89, 0x6d, 0xc0, + 0x00, 0x61, 0x95, 0x49, + 0x06, 0xf0, 0xfa, 0xc0, + 0x0c, 0x61, 0x92, 0x48, + 0x93, 0x48, 0x0c, 0x89, + 0x02, 0xc0, 0x00, 0xb8, + 0xe2, 0x04, 0x02, 0xc2, + 0x00, 0xba, 0xec, 0x11, + 0x60, 0x60, 0x85, 0x49, + 0x0d, 0xf1, 0x11, 0xc6, + 0xd2, 0x61, 0x91, 0x49, + 0xfd, 0xf0, 0x74, 0x60, + 0x04, 0x48, 0x74, 0x88, + 0x08, 0xc6, 0x08, 0xc0, + 0xc4, 0x98, 0x01, 0x18, + 0xc0, 0x88, 0x02, 0xc0, + 0x00, 0xb8, 0x6e, 0x12, + 0x04, 0xe4, 0x0d, 0x00, + 0x00, 0xd4, 0xd1, 0x49, + 0x3c, 0xf1, 0xd2, 0x49, + 0x16, 0xf1, 0xd3, 0x49, + 0x18, 0xf1, 0xd4, 0x49, + 0x19, 0xf1, 0xd5, 0x49, + 0x1a, 0xf1, 0xd6, 0x49, + 0x1b, 0xf1, 0xd7, 0x49, + 0x1c, 0xf1, 0xd8, 0x49, + 0x1d, 0xf1, 0xd9, 0x49, + 0x20, 0xf1, 0xda, 0x49, + 0x23, 0xf1, 0xdb, 0x49, + 0x24, 0xf1, 0x02, 0xc4, + 0x00, 0xbc, 0x20, 0x04, + 0xe5, 0x8e, 0x02, 0xc4, + 0x00, 0xbc, 0x14, 0x02, + 0x02, 0xc4, 0x00, 0xbc, + 0x16, 0x02, 0x02, 0xc4, + 0x00, 0xbc, 0x18, 0x02, + 0x02, 0xc4, 0x00, 0xbc, + 0x1a, 0x02, 0x02, 0xc4, + 0x00, 0xbc, 0x1c, 0x02, + 0x02, 0xc4, 0x00, 0xbc, + 0x94, 0x02, 0x10, 0xc7, + 0xe0, 0x8e, 0x02, 0xc4, + 0x00, 0xbc, 0x8a, 0x02, + 0x0b, 0xc7, 0xe4, 0x8e, + 0x02, 0xc4, 0x00, 0xbc, + 0x88, 0x02, 0x02, 0xc4, + 0x00, 0xbc, 0x6e, 0x02, + 0x02, 0xc4, 0x00, 0xbc, + 0x5a, 0x02, 0x30, 0xe4, + 0x0c, 0xc3, 0x60, 0x64, + 0xc5, 0x49, 0x04, 0xf1, + 0x74, 0x64, 0xc4, 0x48, + 0x74, 0x8c, 0x06, 0xc3, + 0x64, 0x8e, 0x02, 0xc4, + 0x00, 0xbc, 0x20, 0x04, + 0x00, 0xd8, 0x00, 0xe4, + 0xb2, 0xc0, 0x00, 0x61, + 0x90, 0x49, 0x09, 0xf1, + 0x8b, 0xc6, 0xca, 0x61, + 0x94, 0x49, 0x0e, 0xf1, + 0xf6, 0xc6, 0xda, 0x60, + 0x81, 0x49, 0x0a, 0xf0, + 0x65, 0x60, 0x03, 0x48, + 0x65, 0x88, 0xef, 0xc6, + 0xdc, 0x60, 0x80, 0x48, + 0xdc, 0x88, 0x05, 0xc6, + 0x00, 0xbe, 0x02, 0xc6, + 0x00, 0xbe, 0x36, 0x13, + 0x4c, 0x17, 0x99, 0xc4, + 0x80, 0x65, 0xd0, 0x49, + 0x04, 0xf1, 0xfa, 0x75, + 0x04, 0xc4, 0x00, 0xbc, + 0x03, 0xc4, 0x00, 0xbc, + 0x9a, 0x00, 0xee, 0x01 }; + static u8 pla_patch_b[] = { + 0x08, 0xe0, 0xea, 0xe0, + 0xf2, 0xe0, 0x04, 0xe1, + 0x09, 0xe1, 0x0e, 0xe1, + 0x46, 0xe1, 0xf7, 0xe1, + 0x14, 0xc2, 0x40, 0x73, + 0xba, 0x48, 0x40, 0x9b, + 0x11, 0xc2, 0x40, 0x73, + 0xb0, 0x49, 0x17, 0xf0, + 0xbf, 0x49, 0x03, 0xf1, + 0x09, 0xc5, 0x00, 0xbd, + 0xb1, 0x49, 0x11, 0xf0, + 0xb1, 0x48, 0x40, 0x9b, + 0x02, 0xc2, 0x00, 0xba, + 0x1a, 0x17, 0x00, 0xe0, + 0x1e, 0xfc, 0xbc, 0xc0, + 0xf0, 0xc0, 0xde, 0xe8, + 0x00, 0x80, 0x00, 0x20, + 0x2c, 0x75, 0xd4, 0x49, + 0x12, 0xf1, 0x32, 0xe0, + 0xf8, 0xc2, 0x46, 0x71, + 0xf7, 0xc2, 0x40, 0x73, + 0xbe, 0x49, 0x03, 0xf1, + 0xf5, 0xc7, 0x02, 0xe0, + 0xf2, 0xc7, 0x4f, 0x30, + 0x26, 0x62, 0xa1, 0x49, + 0xf0, 0xf1, 0x22, 0x72, + 0xa0, 0x49, 0xed, 0xf1, + 0x25, 0x25, 0x18, 0x1f, + 0x97, 0x30, 0x91, 0x30, + 0x36, 0x9a, 0x2c, 0x75, + 0x3c, 0xc3, 0x60, 0x73, + 0xb1, 0x49, 0x0d, 0xf1, + 0xdc, 0x21, 0xbc, 0x25, + 0x30, 0xc6, 0xc0, 0x77, + 0x04, 0x13, 0x21, 0xf0, + 0x03, 0x13, 0x22, 0xf0, + 0x02, 0x13, 0x23, 0xf0, + 0x01, 0x13, 0x24, 0xf0, + 0x08, 0x13, 0x08, 0xf1, + 0x2e, 0x73, 0xba, 0x21, + 0xbd, 0x25, 0x05, 0x13, + 0x03, 0xf1, 0x24, 0xc5, + 0x00, 0xbd, 0xd4, 0x49, + 0x03, 0xf1, 0x1c, 0xc5, + 0x00, 0xbd, 0xc4, 0xc6, + 0xc6, 0x67, 0x2e, 0x75, + 0xd7, 0x22, 0xdd, 0x26, + 0x05, 0x15, 0x1b, 0xf0, + 0x14, 0xc6, 0x00, 0xbe, + 0x13, 0xc5, 0x00, 0xbd, + 0x12, 0xc5, 0x00, 0xbd, + 0xf1, 0x49, 0xfb, 0xf1, + 0xef, 0xe7, 0xf4, 0x49, + 0xfa, 0xf1, 0xec, 0xe7, + 0xf3, 0x49, 0xf7, 0xf1, + 0xe9, 0xe7, 0xf2, 0x49, + 0xf4, 0xf1, 0xe6, 0xe7, + 0xb6, 0xc0, 0x9e, 0x12, + 0xde, 0x11, 0x0a, 0x12, + 0x3c, 0x13, 0x00, 0xa0, + 0xa0, 0xd1, 0x00, 0x00, + 0xc0, 0x75, 0xd0, 0x49, + 0x46, 0xf0, 0x26, 0x72, + 0xa7, 0x49, 0x43, 0xf0, + 0x22, 0x72, 0x25, 0x25, + 0x20, 0x1f, 0x97, 0x30, + 0x91, 0x30, 0x40, 0x73, + 0xf3, 0xc4, 0x1c, 0x40, + 0x04, 0xf0, 0xd7, 0x49, + 0x05, 0xf1, 0x37, 0xe0, + 0x53, 0x48, 0xc0, 0x9d, + 0x08, 0x02, 0x40, 0x66, + 0x64, 0x27, 0x06, 0x16, + 0x30, 0xf1, 0x46, 0x63, + 0x3b, 0x13, 0x2d, 0xf1, + 0x34, 0x9b, 0x18, 0x1b, + 0x93, 0x30, 0x2b, 0xc3, + 0x10, 0x1c, 0x2b, 0xe8, + 0x01, 0x14, 0x25, 0xf1, + 0x00, 0x1d, 0x26, 0x1a, + 0x8a, 0x30, 0x22, 0x73, + 0xb5, 0x25, 0x0e, 0x0b, + 0x00, 0x1c, 0x2c, 0xe8, + 0x1f, 0xc7, 0x27, 0x40, + 0x1a, 0xf1, 0x38, 0xe8, + 0x32, 0x1f, 0x8f, 0x30, + 0x08, 0x1b, 0x24, 0xe8, + 0x36, 0x72, 0x46, 0x77, + 0x00, 0x17, 0x0d, 0xf0, + 0x13, 0xc3, 0x1f, 0x40, + 0x03, 0xf1, 0x00, 0x1f, + 0x46, 0x9f, 0x44, 0x77, + 0x9f, 0x44, 0x5f, 0x44, + 0x17, 0xe8, 0x0a, 0xc7, + 0x27, 0x40, 0x05, 0xf1, + 0x02, 0xc3, 0x00, 0xbb, + 0xfa, 0x18, 0xb0, 0x18, + 0xff, 0xc7, 0x00, 0xbf, + 0xb8, 0xcd, 0xff, 0xff, + 0x02, 0x0c, 0x54, 0xa5, + 0xdc, 0xa5, 0x2f, 0x40, + 0x05, 0xf1, 0x00, 0x14, + 0xfa, 0xf1, 0x01, 0x1c, + 0x02, 0xe0, 0x00, 0x1c, + 0x80, 0xff, 0xb0, 0x49, + 0x04, 0xf0, 0x01, 0x0b, + 0xd3, 0xa1, 0x03, 0xe0, + 0x02, 0x0b, 0xd3, 0xa5, + 0x27, 0x31, 0x20, 0x37, + 0x02, 0x0b, 0xd3, 0xa5, + 0x27, 0x31, 0x20, 0x37, + 0x00, 0x13, 0xfb, 0xf1, + 0x80, 0xff, 0x22, 0x73, + 0xb5, 0x25, 0x18, 0x1e, + 0xde, 0x30, 0xd9, 0x30, + 0x64, 0x72, 0x11, 0x1e, + 0x68, 0x23, 0x16, 0x31, + 0x80, 0xff, 0x08, 0xc2, + 0x40, 0x73, 0x3a, 0x48, + 0x40, 0x9b, 0x06, 0xff, + 0x02, 0xc6, 0x00, 0xbe, + 0x08, 0x16, 0x1e, 0xfc, + 0x2c, 0x75, 0xdc, 0x21, + 0xbc, 0x25, 0x04, 0x13, + 0x0b, 0xf0, 0x03, 0x13, + 0x09, 0xf0, 0x02, 0x13, + 0x07, 0xf0, 0x01, 0x13, + 0x05, 0xf0, 0x08, 0x13, + 0x03, 0xf0, 0x04, 0xc3, + 0x00, 0xbb, 0x03, 0xc3, + 0x00, 0xbb, 0x8c, 0x15, + 0x76, 0x15, 0xa0, 0x64, + 0x40, 0x48, 0xa0, 0x8c, + 0x02, 0xc4, 0x00, 0xbc, + 0x82, 0x00, 0xa0, 0x62, + 0x21, 0x48, 0xa0, 0x8a, + 0x02, 0xc2, 0x00, 0xba, + 0x40, 0x03, 0x33, 0xc5, + 0xa0, 0x74, 0xc0, 0x49, + 0x1f, 0xf0, 0x30, 0xc5, + 0xa0, 0x73, 0x00, 0x13, + 0x04, 0xf1, 0xa2, 0x73, + 0x00, 0x13, 0x14, 0xf0, + 0x28, 0xc5, 0xa0, 0x74, + 0xc8, 0x49, 0x1b, 0xf1, + 0x26, 0xc5, 0xa0, 0x76, + 0xa2, 0x74, 0x01, 0x06, + 0x20, 0x37, 0xa0, 0x9e, + 0xa2, 0x9c, 0x1e, 0xc5, + 0xa2, 0x73, 0x23, 0x40, + 0x10, 0xf8, 0x04, 0xf3, + 0xa0, 0x73, 0x33, 0x40, + 0x0c, 0xf8, 0x15, 0xc5, + 0xa0, 0x74, 0x41, 0x48, + 0xa0, 0x9c, 0x14, 0xc5, + 0xa0, 0x76, 0x62, 0x48, + 0xe0, 0x48, 0xa0, 0x9e, + 0x10, 0xc6, 0x00, 0xbe, + 0x0a, 0xc5, 0xa0, 0x74, + 0x48, 0x48, 0xa0, 0x9c, + 0x0b, 0xc5, 0x20, 0x1e, + 0xa0, 0x9e, 0xe5, 0x48, + 0xa0, 0x9e, 0xf0, 0xe7, + 0xbc, 0xc0, 0xc8, 0xd2, + 0xcc, 0xd2, 0x28, 0xe4, + 0xe6, 0x01, 0xf0, 0xc0, + 0x18, 0x89, 0x00, 0x1d, + 0x3c, 0xc3, 0x64, 0x71, + 0x3c, 0xc0, 0x02, 0x99, + 0x00, 0x61, 0x67, 0x11, + 0x3c, 0xf1, 0x69, 0x33, + 0x35, 0xc0, 0x28, 0x40, + 0xf6, 0xf1, 0x34, 0xc0, + 0x00, 0x19, 0x81, 0x1b, + 0x91, 0xe8, 0x31, 0xc0, + 0x04, 0x1a, 0x84, 0x1b, + 0x8d, 0xe8, 0x82, 0xe8, + 0xa3, 0x49, 0xfe, 0xf0, + 0x2b, 0xc0, 0x7e, 0xe8, + 0xa1, 0x48, 0x28, 0xc0, + 0x84, 0x1b, 0x84, 0xe8, + 0x00, 0x1d, 0x69, 0x33, + 0x00, 0x1e, 0x01, 0x06, + 0xff, 0x18, 0x30, 0x40, + 0xfd, 0xf1, 0x19, 0xc0, + 0x00, 0x76, 0x2e, 0x40, + 0xf7, 0xf1, 0x21, 0x48, + 0x19, 0xc0, 0x84, 0x1b, + 0x75, 0xe8, 0x10, 0xc0, + 0x69, 0xe8, 0xa1, 0x49, + 0xfd, 0xf0, 0x11, 0xc0, + 0x00, 0x1a, 0x84, 0x1b, + 0x6d, 0xe8, 0x62, 0xe8, + 0xa5, 0x49, 0xfe, 0xf0, + 0x09, 0xc0, 0x01, 0x19, + 0x81, 0x1b, 0x66, 0xe8, + 0x54, 0xe0, 0x10, 0xd4, + 0x88, 0xd3, 0xb8, 0x0b, + 0x50, 0xe8, 0x20, 0xb4, + 0x10, 0xd8, 0x84, 0xd4, + 0xfd, 0xc0, 0x52, 0xe8, + 0x48, 0x33, 0xf9, 0xc0, + 0x00, 0x61, 0x9c, 0x20, + 0x9c, 0x24, 0xd0, 0x49, + 0x04, 0xf0, 0x04, 0x11, + 0x02, 0xf1, 0x03, 0xe0, + 0x00, 0x11, 0x06, 0xf1, + 0x5c, 0xc0, 0x00, 0x61, + 0x92, 0x48, 0x00, 0x89, + 0x3a, 0xe0, 0x06, 0x11, + 0x06, 0xf1, 0x55, 0xc0, + 0x00, 0x61, 0x11, 0x48, + 0x00, 0x89, 0x33, 0xe0, + 0x05, 0x11, 0x08, 0xf1, + 0x4e, 0xc0, 0x00, 0x61, + 0x91, 0x49, 0x04, 0xf0, + 0x91, 0x48, 0x00, 0x89, + 0x11, 0xe0, 0xd9, 0xc0, + 0x00, 0x61, 0x98, 0x20, + 0x98, 0x24, 0x25, 0x11, + 0x24, 0xf1, 0x44, 0xc0, + 0x29, 0xe8, 0x95, 0x49, + 0x20, 0xf0, 0xcf, 0xc0, + 0x00, 0x61, 0x98, 0x20, + 0x98, 0x24, 0x25, 0x11, + 0x1a, 0xf1, 0x37, 0xc0, + 0x00, 0x61, 0x92, 0x49, + 0x16, 0xf1, 0x12, 0x48, + 0x00, 0x89, 0x2f, 0xc0, + 0x00, 0x19, 0x00, 0x89, + 0x2d, 0xc0, 0x01, 0x89, + 0x2d, 0xc0, 0x04, 0x19, + 0x81, 0x1b, 0x1c, 0xe8, + 0x2a, 0xc0, 0x14, 0x19, + 0x81, 0x1b, 0x18, 0xe8, + 0x21, 0xc0, 0x0c, 0xe8, + 0x1f, 0xc0, 0x12, 0x48, + 0x81, 0x1b, 0x12, 0xe8, + 0xae, 0xc3, 0x66, 0x71, + 0xae, 0xc0, 0x02, 0x99, + 0x02, 0xc0, 0x00, 0xb8, + 0x96, 0x07, 0x13, 0xc4, + 0x84, 0x98, 0x00, 0x1b, + 0x86, 0x8b, 0x86, 0x73, + 0xbf, 0x49, 0xfe, 0xf1, + 0x80, 0x71, 0x82, 0x72, + 0x80, 0xff, 0x09, 0xc4, + 0x84, 0x98, 0x80, 0x99, + 0x82, 0x9a, 0x86, 0x8b, + 0x86, 0x73, 0xbf, 0x49, + 0xfe, 0xf1, 0x80, 0xff, + 0x08, 0xea, 0x30, 0xd4, + 0x10, 0xc0, 0x12, 0xe8, + 0x8a, 0xd3, 0x28, 0xe4, + 0x2c, 0xe4, 0x00, 0xd8, + 0x00, 0x00, 0x00, 0x00 }; + + r8153_pre_ram_code(tp, 0x8146, 0x7001); + sram_write(tp, 0xb820, 0x0290); + sram_write(tp, 0xa012, 0x0000); + sram_write(tp, 0xa014, 0x2c04); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2c07); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2c0a); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2c0d); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xa240); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xa104); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x292d); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x8620); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xa480); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2a2c); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x8480); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xa101); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2a36); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xd056); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2223); + sram_write(tp, 0xa01a, 0x0000); + sram_write(tp, 0xa006, 0x0222); + sram_write(tp, 0xa004, 0x0a35); + sram_write(tp, 0xa002, 0x0a2b); + sram_write(tp, 0xa000, 0xf92c); + sram_write(tp, 0xb820, 0x0210); + r8153_post_ram_code(tp, 0x8146); + + r8153_wdt1_end(tp); + r8153_clear_bp(tp); + + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_EN, 0x0000); + generic_ocp_write(tp, 0xf800, 0xff, sizeof(usb_patch_b), + usb_patch_b, MCU_TYPE_USB); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc26, 0xa000); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc28, 0x180c); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc2a, 0x0506); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc2c, 0x04E0); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc2e, 0x11E4); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc30, 0x125C); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc32, 0x0232); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc34, 0x131E); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc36, 0x0098); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_EN, 0x00FF); + + if (!(ocp_read_word(tp, MCU_TYPE_PLA, 0xd38e) & BIT(0))) { + ocp_write_word(tp, MCU_TYPE_PLA, 0xd38c, 0x0082); + ocp_write_word(tp, MCU_TYPE_PLA, 0xd38e, 0x0082); + } + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_EN, 0x0000); + generic_ocp_write(tp, 0xf800, 0xff, sizeof(pla_patch_b), + pla_patch_b, MCU_TYPE_PLA); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc26, 0x8000); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc28, 0x1154); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc2a, 0x1606); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc2c, 0x155a); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc2e, 0x0080); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc30, 0x033c); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc32, 0x01a0); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc34, 0x0794); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc36, 0x0000); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_EN, 0x007f); + + ocp_write_word(tp, MCU_TYPE_PLA, 0xd388, 0x08ca); + } else if (tp->version == RTL_VER_05) { + u32 ocp_data; + static u8 usb_patch_c[] = { + 0x08, 0xe0, 0x0a, 0xe0, + 0x14, 0xe0, 0x58, 0xe0, + 0x64, 0xe0, 0x79, 0xe0, + 0xa8, 0xe0, 0xb3, 0xe0, + 0x02, 0xc5, 0x00, 0xbd, + 0x38, 0x3b, 0xdb, 0x49, + 0x04, 0xf1, 0x06, 0xc3, + 0x00, 0xbb, 0x5a, 0x02, + 0x05, 0xc4, 0x03, 0xc3, + 0x00, 0xbb, 0xa4, 0x04, + 0x7e, 0x02, 0x30, 0xd4, + 0x65, 0xc6, 0x66, 0x61, + 0x92, 0x49, 0x12, 0xf1, + 0x3e, 0xc0, 0x02, 0x61, + 0x97, 0x49, 0x05, 0xf0, + 0x3c, 0xc0, 0x00, 0x61, + 0x90, 0x49, 0x0a, 0xf1, + 0xca, 0x63, 0xb0, 0x49, + 0x09, 0xf1, 0xb1, 0x49, + 0x05, 0xf0, 0x32, 0xc0, + 0x00, 0x71, 0x9e, 0x49, + 0x03, 0xf1, 0xb0, 0x48, + 0x05, 0xe0, 0x30, 0x48, + 0xda, 0x61, 0x10, 0x48, + 0xda, 0x89, 0x4a, 0xc6, + 0xc0, 0x60, 0x85, 0x49, + 0x03, 0xf0, 0x31, 0x48, + 0x04, 0xe0, 0xb1, 0x48, + 0xb2, 0x48, 0x0f, 0xe0, + 0x30, 0x18, 0x1b, 0xc1, + 0x0f, 0xe8, 0x1a, 0xc6, + 0xc7, 0x65, 0xd0, 0x49, + 0x05, 0xf0, 0x32, 0x48, + 0x02, 0xc2, 0x00, 0xba, + 0x3e, 0x16, 0x02, 0xc2, + 0x00, 0xba, 0x48, 0x16, + 0x02, 0xc2, 0x00, 0xba, + 0x4a, 0x16, 0x02, 0xb4, + 0x09, 0xc2, 0x40, 0x99, + 0x0e, 0x48, 0x42, 0x98, + 0x42, 0x70, 0x8e, 0x49, + 0xfe, 0xf1, 0x02, 0xb0, + 0x80, 0xff, 0xc0, 0xd4, + 0xe4, 0x40, 0x20, 0xd4, + 0xca, 0xcf, 0x00, 0xcf, + 0x3c, 0xe4, 0x0c, 0xc0, + 0x00, 0x63, 0xb5, 0x49, + 0x09, 0xc0, 0x30, 0x18, + 0x06, 0xc1, 0xea, 0xef, + 0xf5, 0xc7, 0x02, 0xc0, + 0x00, 0xb8, 0xd0, 0x10, + 0xe4, 0x4b, 0x00, 0xd8, + 0x14, 0xc3, 0x60, 0x61, + 0x90, 0x49, 0x06, 0xf0, + 0x11, 0xc3, 0x70, 0x61, + 0x12, 0x48, 0x70, 0x89, + 0x08, 0xe0, 0x0a, 0xc6, + 0xd4, 0x61, 0x93, 0x48, + 0xd4, 0x89, 0x02, 0xc1, + 0x00, 0xb9, 0x72, 0x17, + 0x02, 0xc1, 0x00, 0xb9, + 0x9c, 0x15, 0x00, 0xd8, + 0xef, 0xcf, 0x20, 0xd4, + 0x2b, 0xc5, 0xa0, 0x77, + 0x00, 0x1c, 0xa0, 0x9c, + 0x28, 0xc5, 0xa0, 0x64, + 0xc0, 0x48, 0xc1, 0x48, + 0xc2, 0x48, 0xa0, 0x8c, + 0xb1, 0x64, 0xc0, 0x48, + 0xb1, 0x8c, 0x20, 0xc5, + 0xa0, 0x64, 0x40, 0x48, + 0x41, 0x48, 0xc2, 0x48, + 0xa0, 0x8c, 0x19, 0xc5, + 0xa4, 0x64, 0x44, 0x48, + 0xa4, 0x8c, 0xb1, 0x64, + 0x40, 0x48, 0xb1, 0x8c, + 0x14, 0xc4, 0x80, 0x73, + 0x13, 0xc4, 0x82, 0x9b, + 0x11, 0x1b, 0x80, 0x9b, + 0x0c, 0xc5, 0xa0, 0x64, + 0x40, 0x48, 0x41, 0x48, + 0x42, 0x48, 0xa0, 0x8c, + 0x05, 0xc5, 0xa0, 0x9f, + 0x02, 0xc5, 0x00, 0xbd, + 0x6c, 0x3a, 0x1e, 0xfc, + 0x10, 0xd8, 0x86, 0xd4, + 0xf8, 0xcb, 0x20, 0xe4, + 0x0a, 0xc0, 0x16, 0x61, + 0x91, 0x48, 0x16, 0x89, + 0x07, 0xc0, 0x11, 0x19, + 0x0c, 0x89, 0x02, 0xc1, + 0x00, 0xb9, 0x02, 0x06, + 0x00, 0xd4, 0x40, 0xb4, + 0xfe, 0xc0, 0x16, 0x61, + 0x91, 0x48, 0x16, 0x89, + 0xfb, 0xc0, 0x11, 0x19, + 0x0c, 0x89, 0x02, 0xc1, + 0x00, 0xb9, 0xd2, 0x05 }; + static u8 pla_patch_c[] = { + 0x5d, 0xe0, 0x07, 0xe0, + 0x0f, 0xe0, 0x5a, 0xe0, + 0x59, 0xe0, 0x1f, 0xe0, + 0x57, 0xe0, 0x3e, 0xe1, + 0x08, 0xc2, 0x40, 0x73, + 0x3a, 0x48, 0x40, 0x9b, + 0x06, 0xff, 0x02, 0xc6, + 0x00, 0xbe, 0xcc, 0x17, + 0x1e, 0xfc, 0x2c, 0x75, + 0xdc, 0x21, 0xbc, 0x25, + 0x04, 0x13, 0x0b, 0xf0, + 0x03, 0x13, 0x09, 0xf0, + 0x02, 0x13, 0x07, 0xf0, + 0x01, 0x13, 0x05, 0xf0, + 0x08, 0x13, 0x03, 0xf0, + 0x04, 0xc3, 0x00, 0xbb, + 0x03, 0xc3, 0x00, 0xbb, + 0x50, 0x17, 0x3a, 0x17, + 0x33, 0xc5, 0xa0, 0x74, + 0xc0, 0x49, 0x1f, 0xf0, + 0x30, 0xc5, 0xa0, 0x73, + 0x00, 0x13, 0x04, 0xf1, + 0xa2, 0x73, 0x00, 0x13, + 0x14, 0xf0, 0x28, 0xc5, + 0xa0, 0x74, 0xc8, 0x49, + 0x1b, 0xf1, 0x26, 0xc5, + 0xa0, 0x76, 0xa2, 0x74, + 0x01, 0x06, 0x20, 0x37, + 0xa0, 0x9e, 0xa2, 0x9c, + 0x1e, 0xc5, 0xa2, 0x73, + 0x23, 0x40, 0x10, 0xf8, + 0x04, 0xf3, 0xa0, 0x73, + 0x33, 0x40, 0x0c, 0xf8, + 0x15, 0xc5, 0xa0, 0x74, + 0x41, 0x48, 0xa0, 0x9c, + 0x14, 0xc5, 0xa0, 0x76, + 0x62, 0x48, 0xe0, 0x48, + 0xa0, 0x9e, 0x10, 0xc6, + 0x00, 0xbe, 0x0a, 0xc5, + 0xa0, 0x74, 0x48, 0x48, + 0xa0, 0x9c, 0x0b, 0xc5, + 0x20, 0x1e, 0xa0, 0x9e, + 0xe5, 0x48, 0xa0, 0x9e, + 0xf0, 0xe7, 0xbc, 0xc0, + 0xc8, 0xd2, 0xcc, 0xd2, + 0x28, 0xe4, 0xfa, 0x01, + 0xf0, 0xc0, 0x18, 0x89, + 0x74, 0xc0, 0xcd, 0xe8, + 0x80, 0x76, 0x00, 0x1d, + 0x6e, 0xc3, 0x66, 0x62, + 0xa0, 0x49, 0x06, 0xf0, + 0x64, 0xc0, 0x02, 0x71, + 0x60, 0x99, 0x62, 0xc1, + 0x03, 0xe0, 0x5f, 0xc0, + 0x60, 0xc1, 0x02, 0x99, + 0x00, 0x61, 0x0f, 0x1b, + 0x59, 0x41, 0x03, 0x13, + 0x18, 0xf1, 0xe4, 0x49, + 0x20, 0xf1, 0xe5, 0x49, + 0x1e, 0xf0, 0x59, 0xc6, + 0xd0, 0x73, 0xb7, 0x49, + 0x08, 0xf0, 0x01, 0x0b, + 0x80, 0x13, 0x03, 0xf0, + 0xd0, 0x8b, 0x03, 0xe0, + 0x3f, 0x48, 0xd0, 0x9b, + 0x51, 0xc0, 0x10, 0x1a, + 0x84, 0x1b, 0xb1, 0xe8, + 0x4b, 0xc2, 0x40, 0x63, + 0x30, 0x48, 0x0a, 0xe0, + 0xe5, 0x49, 0x09, 0xf0, + 0x47, 0xc0, 0x00, 0x1a, + 0x84, 0x1b, 0xa7, 0xe8, + 0x41, 0xc2, 0x40, 0x63, + 0xb0, 0x48, 0x40, 0x8b, + 0x67, 0x11, 0x3f, 0xf1, + 0x69, 0x33, 0x32, 0xc0, + 0x28, 0x40, 0xd2, 0xf1, + 0x33, 0xc0, 0x00, 0x19, + 0x81, 0x1b, 0x99, 0xe8, + 0x30, 0xc0, 0x04, 0x1a, + 0x84, 0x1b, 0x95, 0xe8, + 0x8a, 0xe8, 0xa3, 0x49, + 0xfe, 0xf0, 0x2a, 0xc0, + 0x86, 0xe8, 0xa1, 0x48, + 0x84, 0x1b, 0x8d, 0xe8, + 0x00, 0x1d, 0x69, 0x33, + 0x00, 0x1e, 0x01, 0x06, + 0xff, 0x18, 0x30, 0x40, + 0xfd, 0xf1, 0x1f, 0xc0, + 0x00, 0x76, 0x2e, 0x40, + 0xf7, 0xf1, 0x21, 0x48, + 0x19, 0xc0, 0x84, 0x1b, + 0x7e, 0xe8, 0x74, 0x08, + 0x72, 0xe8, 0xa1, 0x49, + 0xfd, 0xf0, 0x11, 0xc0, + 0x00, 0x1a, 0x84, 0x1b, + 0x76, 0xe8, 0x6b, 0xe8, + 0xa5, 0x49, 0xfe, 0xf0, + 0x09, 0xc0, 0x01, 0x19, + 0x81, 0x1b, 0x6f, 0xe8, + 0x5a, 0xe0, 0xb8, 0x0b, + 0x50, 0xe8, 0x83, 0x00, + 0x82, 0x00, 0x20, 0xb4, + 0x10, 0xd8, 0x84, 0xd4, + 0x88, 0xd3, 0x10, 0xe0, + 0x00, 0xd8, 0x24, 0xd4, + 0xf9, 0xc0, 0x57, 0xe8, + 0x48, 0x33, 0xf3, 0xc0, + 0x00, 0x61, 0x6a, 0xc0, + 0x47, 0x11, 0x03, 0xf0, + 0x57, 0x11, 0x05, 0xf1, + 0x00, 0x61, 0x17, 0x48, + 0x00, 0x89, 0x41, 0xe0, + 0x9c, 0x20, 0x9c, 0x24, + 0xd0, 0x49, 0x09, 0xf0, + 0x04, 0x11, 0x07, 0xf1, + 0x00, 0x61, 0x97, 0x49, + 0x38, 0xf0, 0x97, 0x48, + 0x00, 0x89, 0x2b, 0xe0, + 0x00, 0x11, 0x05, 0xf1, + 0x00, 0x61, 0x92, 0x48, + 0x00, 0x89, 0x2f, 0xe0, + 0x06, 0x11, 0x05, 0xf1, + 0x00, 0x61, 0x11, 0x48, + 0x00, 0x89, 0x29, 0xe0, + 0x05, 0x11, 0x0f, 0xf1, + 0x00, 0x61, 0x93, 0x49, + 0x1a, 0xf1, 0x91, 0x49, + 0x0a, 0xf0, 0x91, 0x48, + 0x00, 0x89, 0x0f, 0xe0, + 0xc6, 0xc0, 0x00, 0x61, + 0x98, 0x20, 0x98, 0x24, + 0x25, 0x11, 0x80, 0xff, + 0xfa, 0xef, 0x17, 0xf1, + 0x38, 0xc0, 0x1f, 0xe8, + 0x95, 0x49, 0x13, 0xf0, + 0xf4, 0xef, 0x11, 0xf1, + 0x31, 0xc0, 0x00, 0x61, + 0x92, 0x49, 0x0d, 0xf1, + 0x12, 0x48, 0x00, 0x89, + 0x29, 0xc0, 0x00, 0x19, + 0x00, 0x89, 0x27, 0xc0, + 0x01, 0x89, 0x23, 0xc0, + 0x0e, 0xe8, 0x12, 0x48, + 0x81, 0x1b, 0x15, 0xe8, + 0xae, 0xc3, 0x66, 0x62, + 0xa0, 0x49, 0x04, 0xf0, + 0x64, 0x71, 0xa3, 0xc0, + 0x02, 0x99, 0x02, 0xc0, + 0x00, 0xb8, 0xd6, 0x07, + 0x13, 0xc4, 0x84, 0x98, + 0x00, 0x1b, 0x86, 0x8b, + 0x86, 0x73, 0xbf, 0x49, + 0xfe, 0xf1, 0x80, 0x71, + 0x82, 0x72, 0x80, 0xff, + 0x09, 0xc4, 0x84, 0x98, + 0x80, 0x99, 0x82, 0x9a, + 0x86, 0x8b, 0x86, 0x73, + 0xbf, 0x49, 0xfe, 0xf1, + 0x80, 0xff, 0x08, 0xea, + 0x30, 0xd4, 0x10, 0xc0, + 0x12, 0xe8, 0x8a, 0xd3, + 0x00, 0xd8, 0x02, 0xc6, + 0x00, 0xbe, 0xe0, 0x08 }; + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, 0xcfca); + ocp_data &= ~0x4000; + ocp_write_word(tp, MCU_TYPE_USB, 0xcfca, ocp_data); + + r8153_pre_ram_code(tp, 0x8146, 0x7001); + sram_write(tp, 0xb820, 0x0290); + sram_write(tp, 0xa012, 0x0000); + sram_write(tp, 0xa014, 0x2c04); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2c07); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2c0a); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2c0d); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xa240); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xa104); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x292d); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x8620); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xa480); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2a2c); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x8480); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xa101); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2a36); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xd056); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2223); + sram_write(tp, 0xa01a, 0x0000); + sram_write(tp, 0xa006, 0x0222); + sram_write(tp, 0xa004, 0x0a35); + sram_write(tp, 0xa002, 0x0a2b); + sram_write(tp, 0xa000, 0xf92c); + sram_write(tp, 0xb820, 0x0210); + r8153_post_ram_code(tp, 0x8146); + + r8153_wdt1_end(tp); + r8153_clear_bp(tp); + + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_EN, 0x0000); + generic_ocp_write(tp, 0xf800, 0xff, sizeof(usb_patch_c), + usb_patch_c, MCU_TYPE_USB); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc26, 0xa000); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc28, 0x3b34); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc2a, 0x027c); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc2c, 0x15de); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc2e, 0x10ce); + if (ocp_read_byte(tp, MCU_TYPE_USB, 0xcfef) & 1) + ocp_write_word(tp, MCU_TYPE_USB, 0xfc30, 0x1578); + else + ocp_write_word(tp, MCU_TYPE_USB, 0xfc30, 0x1adc); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc32, 0x3a28); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc34, 0x05f8); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc36, 0x05c8); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_EN, 0x00ff); + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_EN, 0x0000); + generic_ocp_write(tp, 0xf800, 0xff, sizeof(pla_patch_c), + pla_patch_c, MCU_TYPE_PLA); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc26, 0x8000); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc28, 0x1306); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc2a, 0x17ca); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc2c, 0x171e); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc2e, 0x0000); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc30, 0x0000); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc32, 0x01b4); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc34, 0x07d4); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc36, 0x0894); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_EN, 0x00e6); + + ocp_write_word(tp, MCU_TYPE_PLA, 0xd388, 0x08ca); + ocp_write_word(tp, MCU_TYPE_PLA, 0xd398, 0x0084); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, 0xcfca); + ocp_data |= 0x4000; + ocp_write_word(tp, MCU_TYPE_USB, 0xcfca, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_USB2PHY); + ocp_data |= USB2PHY_L1 | USB2PHY_SUSPEND; + ocp_write_byte(tp, MCU_TYPE_USB, USB_USB2PHY, ocp_data); + } else if (tp->version == RTL_VER_06) { + u32 ocp_data; + static u8 usb_patch_d[] = { + 0x08, 0xe0, 0x0e, 0xe0, + 0x11, 0xe0, 0x24, 0xe0, + 0x30, 0xe0, 0x38, 0xe0, + 0x3a, 0xe0, 0x3c, 0xe0, + 0x1e, 0xc3, 0x70, 0x61, + 0x12, 0x48, 0x70, 0x89, + 0x02, 0xc3, 0x00, 0xbb, + 0x02, 0x17, 0x31, 0x19, + 0x02, 0xc3, 0x00, 0xbb, + 0x44, 0x14, 0x30, 0x18, + 0x11, 0xc1, 0x05, 0xe8, + 0x10, 0xc6, 0x02, 0xc2, + 0x00, 0xba, 0x94, 0x17, + 0x02, 0xb4, 0x09, 0xc2, + 0x40, 0x99, 0x0e, 0x48, + 0x42, 0x98, 0x42, 0x70, + 0x8e, 0x49, 0xfe, 0xf1, + 0x02, 0xb0, 0x80, 0xff, + 0xc0, 0xd4, 0xe4, 0x40, + 0x20, 0xd4, 0x0c, 0xc0, + 0x00, 0x63, 0xb5, 0x49, + 0x0c, 0xc0, 0x30, 0x18, + 0x06, 0xc1, 0xed, 0xef, + 0xf8, 0xc7, 0x02, 0xc0, + 0x00, 0xb8, 0x38, 0x12, + 0xe4, 0x4b, 0x00, 0xd8, + 0x0c, 0x61, 0x95, 0x48, + 0x96, 0x48, 0x92, 0x48, + 0x93, 0x48, 0x0c, 0x89, + 0x02, 0xc0, 0x00, 0xb8, + 0x0e, 0x06, 0x02, 0xc5, + 0x00, 0xbd, 0x00, 0x00, + 0x02, 0xc1, 0x00, 0xb9, + 0x00, 0x00, 0x02, 0xc1, + 0x00, 0xb9, 0x00, 0x00 }; + static u8 pla_patch_d[] = { + 0x03, 0xe0, 0x16, 0xe0, + 0x30, 0xe0, 0x12, 0xc2, + 0x40, 0x73, 0xb0, 0x49, + 0x08, 0xf0, 0xb8, 0x49, + 0x06, 0xf0, 0xb8, 0x48, + 0x40, 0x9b, 0x0b, 0xc2, + 0x40, 0x76, 0x05, 0xe0, + 0x02, 0x61, 0x02, 0xc3, + 0x00, 0xbb, 0x54, 0x08, + 0x02, 0xc3, 0x00, 0xbb, + 0x64, 0x08, 0x98, 0xd3, + 0x1e, 0xfc, 0xfe, 0xc0, + 0x02, 0x62, 0xa0, 0x48, + 0x02, 0x8a, 0x00, 0x72, + 0xa0, 0x49, 0x11, 0xf0, + 0x13, 0xc1, 0x20, 0x62, + 0x2e, 0x21, 0x2f, 0x25, + 0x00, 0x71, 0x9f, 0x24, + 0x0a, 0x40, 0x09, 0xf0, + 0x00, 0x71, 0x18, 0x48, + 0xa0, 0x49, 0x03, 0xf1, + 0x9f, 0x48, 0x02, 0xe0, + 0x1f, 0x48, 0x00, 0x99, + 0x02, 0xc2, 0x00, 0xba, + 0xac, 0x0c, 0x08, 0xe9, + 0x36, 0xc0, 0x00, 0x61, + 0x9c, 0x20, 0x9c, 0x24, + 0x33, 0xc0, 0x07, 0x11, + 0x05, 0xf1, 0x00, 0x61, + 0x17, 0x48, 0x00, 0x89, + 0x0d, 0xe0, 0x04, 0x11, + 0x0b, 0xf1, 0x00, 0x61, + 0x97, 0x49, 0x08, 0xf0, + 0x97, 0x48, 0x00, 0x89, + 0x23, 0xc0, 0x0e, 0xe8, + 0x12, 0x48, 0x81, 0x1b, + 0x15, 0xe8, 0x1f, 0xc0, + 0x00, 0x61, 0x67, 0x11, + 0x04, 0xf0, 0x02, 0xc0, + 0x00, 0xb8, 0x42, 0x09, + 0x02, 0xc0, 0x00, 0xb8, + 0x90, 0x08, 0x13, 0xc4, + 0x84, 0x98, 0x00, 0x1b, + 0x86, 0x8b, 0x86, 0x73, + 0xbf, 0x49, 0xfe, 0xf1, + 0x80, 0x71, 0x82, 0x72, + 0x80, 0xff, 0x09, 0xc4, + 0x84, 0x98, 0x80, 0x99, + 0x82, 0x9a, 0x86, 0x8b, + 0x86, 0x73, 0xbf, 0x49, + 0xfe, 0xf1, 0x80, 0xff, + 0x08, 0xea, 0x30, 0xd4, + 0x50, 0xe8, 0x8a, 0xd3 }; + + r8153_pre_ram_code(tp, 0x8146, 0x7002); + sram_write(tp, 0xb820, 0x0290); + sram_write(tp, 0xa012, 0x0000); + sram_write(tp, 0xa014, 0x2c04); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2c07); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2c07); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2c07); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xa240); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0xa104); + ocp_write_word(tp, MCU_TYPE_PLA, 0xb438, 0x2944); + sram_write(tp, 0xa01a, 0x0000); + sram_write(tp, 0xa006, 0x0fff); + sram_write(tp, 0xa004, 0x0fff); + sram_write(tp, 0xa002, 0x0fff); + sram_write(tp, 0xa000, 0x1943); + sram_write(tp, 0xb820, 0x0210); + r8153_post_ram_code(tp, 0x8146); + + r8153_clear_bp(tp); + + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_EN, 0x0000); + generic_ocp_write(tp, 0xf800, 0xff, sizeof(usb_patch_d), + usb_patch_d, MCU_TYPE_USB); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc26, 0xa000); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc28, 0x16de); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc2a, 0x1442); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc2c, 0x1792); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc2e, 0x1236); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc30, 0x0606); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc32, 0x0000); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc34, 0x0000); + ocp_write_word(tp, MCU_TYPE_USB, 0xfc36, 0x0000); + if (ocp_read_byte(tp, MCU_TYPE_USB, 0xcfef) & 1) + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_EN, 0x001b); + else + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_EN, 0x001a); + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_EN, 0x0000); + generic_ocp_write(tp, 0xf800, 0xff, sizeof(pla_patch_d), + pla_patch_d, MCU_TYPE_PLA); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc26, 0x8000); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc28, 0x0852); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc2a, 0x0c92); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc2c, 0x088c); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc2e, 0x0000); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc30, 0x0000); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc32, 0x0000); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc34, 0x0000); + ocp_write_word(tp, MCU_TYPE_PLA, 0xfc36, 0x0000); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_EN, 0x0007); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_USB2PHY); + ocp_data |= USB2PHY_L1 | USB2PHY_SUSPEND; + ocp_write_byte(tp, MCU_TYPE_USB, USB_USB2PHY, ocp_data); + } +} + +static void r8153b_firmware(struct r8152 *tp) +{ + if (tp->version == RTL_VER_09) { + u32 ocp_data; + static u8 usb_patch2_b[] = { + 0x10, 0xe0, 0x26, 0xe0, + 0x3a, 0xe0, 0x58, 0xe0, + 0x6c, 0xe0, 0x85, 0xe0, + 0xa5, 0xe0, 0xbe, 0xe0, + 0xd8, 0xe0, 0xdb, 0xe0, + 0xdd, 0xe0, 0xdf, 0xe0, + 0xe1, 0xe0, 0xe3, 0xe0, + 0xe5, 0xe0, 0xe7, 0xe0, + 0x16, 0xc0, 0x00, 0x75, + 0xd1, 0x49, 0x0d, 0xf0, + 0x0f, 0xc0, 0x0f, 0xc5, + 0x00, 0x1e, 0x08, 0x9e, + 0x0c, 0x9d, 0x0c, 0xc6, + 0x0a, 0x9e, 0x8f, 0x1c, + 0x0e, 0x8c, 0x0e, 0x74, + 0xcf, 0x49, 0xfe, 0xf1, + 0x02, 0xc0, 0x00, 0xb8, + 0x96, 0x31, 0x00, 0xdc, + 0x24, 0xe4, 0x80, 0x02, + 0x34, 0xd3, 0xff, 0xc3, + 0x60, 0x72, 0xa1, 0x49, + 0x0d, 0xf0, 0xf8, 0xc3, + 0xf8, 0xc2, 0x00, 0x1c, + 0x68, 0x9c, 0xf6, 0xc4, + 0x6a, 0x9c, 0x6c, 0x9a, + 0x8f, 0x1c, 0x6e, 0x8c, + 0x6e, 0x74, 0xcf, 0x49, + 0xfe, 0xf1, 0x04, 0xc0, + 0x02, 0xc2, 0x00, 0xba, + 0xa8, 0x28, 0xf8, 0xc7, + 0xea, 0xc0, 0x00, 0x75, + 0xd1, 0x49, 0x15, 0xf0, + 0x19, 0xc7, 0x17, 0xc2, + 0xec, 0x9a, 0x00, 0x19, + 0xee, 0x89, 0xee, 0x71, + 0x9f, 0x49, 0xfe, 0xf1, + 0xea, 0x71, 0x9f, 0x49, + 0x0a, 0xf0, 0xd9, 0xc2, + 0xec, 0x9a, 0x00, 0x19, + 0xe8, 0x99, 0x81, 0x19, + 0xee, 0x89, 0xee, 0x71, + 0x9f, 0x49, 0xfe, 0xf1, + 0x06, 0xc3, 0x02, 0xc2, + 0x00, 0xba, 0xf0, 0x1d, + 0x4c, 0xe8, 0x00, 0xdc, + 0x00, 0xd4, 0xcb, 0xc0, + 0x00, 0x75, 0xd1, 0x49, + 0x0d, 0xf0, 0xc4, 0xc0, + 0xc4, 0xc5, 0x00, 0x1e, + 0x08, 0x9e, 0xc2, 0xc6, + 0x0a, 0x9e, 0x0c, 0x9d, + 0x8f, 0x1c, 0x0e, 0x8c, + 0x0e, 0x74, 0xcf, 0x49, + 0xfe, 0xf1, 0x04, 0xc0, + 0x02, 0xc1, 0x00, 0xb9, + 0xc4, 0x16, 0x20, 0xd4, + 0xb6, 0xc0, 0x00, 0x75, + 0xd1, 0x48, 0x00, 0x9d, + 0xe5, 0xc7, 0xaf, 0xc2, + 0xec, 0x9a, 0x00, 0x19, + 0xe8, 0x9a, 0x81, 0x19, + 0xee, 0x89, 0xee, 0x71, + 0x9f, 0x49, 0xfe, 0xf1, + 0x2c, 0xc1, 0xec, 0x99, + 0x81, 0x19, 0xee, 0x89, + 0xee, 0x71, 0x9f, 0x49, + 0xfe, 0xf1, 0x04, 0xc3, + 0x02, 0xc2, 0x00, 0xba, + 0x96, 0x1c, 0xc0, 0xd4, + 0xc0, 0x88, 0x1e, 0xc6, + 0xc0, 0x70, 0x8f, 0x49, + 0x0e, 0xf0, 0x8f, 0x48, + 0x93, 0xc6, 0xca, 0x98, + 0x11, 0x18, 0xc8, 0x98, + 0x16, 0xc0, 0xcc, 0x98, + 0x8f, 0x18, 0xce, 0x88, + 0xce, 0x70, 0x8f, 0x49, + 0xfe, 0xf1, 0x0b, 0xe0, + 0x43, 0xc6, 0x00, 0x18, + 0xc8, 0x98, 0x0b, 0xc0, + 0xcc, 0x98, 0x81, 0x18, + 0xce, 0x88, 0xce, 0x70, + 0x8f, 0x49, 0xfe, 0xf1, + 0x02, 0xc0, 0x00, 0xb8, + 0xf2, 0x19, 0x40, 0xd3, + 0x20, 0xe4, 0x33, 0xc2, + 0x40, 0x71, 0x91, 0x48, + 0x40, 0x99, 0x30, 0xc2, + 0x00, 0x19, 0x48, 0x99, + 0xf8, 0xc1, 0x4c, 0x99, + 0x81, 0x19, 0x4e, 0x89, + 0x4e, 0x71, 0x9f, 0x49, + 0xfe, 0xf1, 0x0b, 0xc1, + 0x4c, 0x99, 0x81, 0x19, + 0x4e, 0x89, 0x4e, 0x71, + 0x9f, 0x49, 0xfe, 0xf1, + 0x02, 0x71, 0x02, 0xc2, + 0x00, 0xba, 0x0e, 0x34, + 0x24, 0xe4, 0x19, 0xc2, + 0x40, 0x71, 0x91, 0x48, + 0x40, 0x99, 0x16, 0xc2, + 0x00, 0x19, 0x48, 0x99, + 0xde, 0xc1, 0x4c, 0x99, + 0x81, 0x19, 0x4e, 0x89, + 0x4e, 0x71, 0x9f, 0x49, + 0xfe, 0xf1, 0xf1, 0xc1, + 0x4c, 0x99, 0x81, 0x19, + 0x4e, 0x89, 0x4e, 0x71, + 0x9f, 0x49, 0xfe, 0xf1, + 0x02, 0x71, 0x02, 0xc2, + 0x00, 0xba, 0x60, 0x33, + 0x34, 0xd3, 0x00, 0xdc, + 0x1e, 0x89, 0x02, 0xc0, + 0x00, 0xb8, 0xfa, 0x12, + 0x02, 0xc0, 0x00, 0xb8, + 0x00, 0x00, 0x02, 0xc0, + 0x00, 0xb8, 0x00, 0x00, + 0x02, 0xc0, 0x00, 0xb8, + 0x00, 0x00, 0x02, 0xc0, + 0x00, 0xb8, 0x00, 0x00, + 0x02, 0xc0, 0x00, 0xb8, + 0x00, 0x00, 0x02, 0xc0, + 0x00, 0xb8, 0x00, 0x00, + 0x02, 0xc0, 0x00, 0xb8, + 0x00, 0x00, 0x00, 0x00 }; + static u8 pla_patch2_b[] = { + 0x05, 0xe0, 0x1b, 0xe0, + 0x2c, 0xe0, 0x60, 0xe0, + 0x73, 0xe0, 0x15, 0xc6, + 0xc2, 0x64, 0xd2, 0x49, + 0x06, 0xf1, 0xc4, 0x48, + 0xc5, 0x48, 0xc6, 0x48, + 0xc7, 0x48, 0x05, 0xe0, + 0x44, 0x48, 0x45, 0x48, + 0x46, 0x48, 0x47, 0x48, + 0xc2, 0x8c, 0xc0, 0x64, + 0x46, 0x48, 0xc0, 0x8c, + 0x05, 0xc5, 0x02, 0xc4, + 0x00, 0xbc, 0x18, 0x02, + 0x06, 0xdc, 0xb0, 0xc0, + 0x10, 0xc5, 0xa0, 0x77, + 0xa0, 0x74, 0x46, 0x48, + 0x47, 0x48, 0xa0, 0x9c, + 0x0b, 0xc5, 0xa0, 0x74, + 0x44, 0x48, 0x43, 0x48, + 0xa0, 0x9c, 0x05, 0xc5, + 0xa0, 0x9f, 0x02, 0xc5, + 0x00, 0xbd, 0x3c, 0x03, + 0x1c, 0xe8, 0x20, 0xe8, + 0xd4, 0x49, 0x04, 0xf1, + 0xd5, 0x49, 0x20, 0xf1, + 0x28, 0xe0, 0x2a, 0xc7, + 0xe0, 0x75, 0xda, 0x49, + 0x14, 0xf0, 0x27, 0xc7, + 0xe0, 0x75, 0xdc, 0x49, + 0x10, 0xf1, 0x24, 0xc7, + 0xe0, 0x75, 0x25, 0xc7, + 0xe0, 0x74, 0x2c, 0x40, + 0x0a, 0xfa, 0x1f, 0xc7, + 0xe4, 0x75, 0xd0, 0x49, + 0x09, 0xf1, 0x1c, 0xc5, + 0xe6, 0x9d, 0x11, 0x1d, + 0xe4, 0x8d, 0x04, 0xe0, + 0x16, 0xc7, 0x00, 0x1d, + 0xe4, 0x8d, 0xe0, 0x8e, + 0x11, 0x1d, 0xe0, 0x8d, + 0x07, 0xe0, 0x0c, 0xc7, + 0xe0, 0x75, 0xda, 0x48, + 0xe0, 0x9d, 0x0b, 0xc7, + 0xe4, 0x8e, 0x02, 0xc4, + 0x00, 0xbc, 0x28, 0x03, + 0x02, 0xc4, 0x00, 0xbc, + 0x14, 0x03, 0x12, 0xe8, + 0x4e, 0xe8, 0x1c, 0xe6, + 0x20, 0xe4, 0x80, 0x02, + 0xa4, 0xc0, 0x12, 0xc2, + 0x40, 0x73, 0xb0, 0x49, + 0x08, 0xf0, 0xb8, 0x49, + 0x06, 0xf0, 0xb8, 0x48, + 0x40, 0x9b, 0x0b, 0xc2, + 0x40, 0x76, 0x05, 0xe0, + 0x02, 0x61, 0x02, 0xc3, + 0x00, 0xbb, 0x0a, 0x0a, + 0x02, 0xc3, 0x00, 0xbb, + 0x1a, 0x0a, 0x98, 0xd3, + 0x1e, 0xfc, 0xfe, 0xc0, + 0x02, 0x62, 0xa0, 0x48, + 0x02, 0x8a, 0x00, 0x72, + 0xa0, 0x49, 0x11, 0xf0, + 0x13, 0xc1, 0x20, 0x62, + 0x2e, 0x21, 0x2f, 0x25, + 0x00, 0x71, 0x9f, 0x24, + 0x0a, 0x40, 0x09, 0xf0, + 0x00, 0x71, 0x18, 0x48, + 0xa0, 0x49, 0x03, 0xf1, + 0x9f, 0x48, 0x02, 0xe0, + 0x1f, 0x48, 0x00, 0x99, + 0x02, 0xc2, 0x00, 0xba, + 0xda, 0x0e, 0x08, 0xe9 }; + + r8153b_clear_bp(tp, MCU_TYPE_USB); + r8153b_clear_bp(tp, MCU_TYPE_PLA); + + ocp_write_word(tp, MCU_TYPE_USB, 0xd340, 0x807d); + + generic_ocp_write(tp, 0xe600, 0xff, sizeof(usb_patch2_b), + usb_patch2_b, MCU_TYPE_USB); + + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0xa000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_0, 0x2a20); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_1, 0x28a6); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_2, 0x1dee); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_3, 0x16c2); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_4, 0x1c94); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_5, 0x19f0); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_6, 0x340c); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_7, 0x335e); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_8, 0x12f8); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_9, 0x0000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_10, 0x0000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_11, 0x0000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_12, 0x0000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_13, 0x0000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_14, 0x0000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_15, 0x0000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP2_EN, 0x01ff); + + generic_ocp_write(tp, 0xf800, 0xff, sizeof(pla_patch2_b), + pla_patch2_b, MCU_TYPE_PLA); + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0x8000); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_0, 0x0216); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_1, 0x0332); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_2, 0x030c); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_3, 0x0a08); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_4, 0x0ec0); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_5, 0x0000); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_6, 0x0000); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_7, 0x0000); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_EN, 0x001e); + + if (ocp_read_byte(tp, MCU_TYPE_USB, 0xd81f) & BIT(2)) { + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BP_EN); + ocp_data |= BIT(0); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_EN, ocp_data); + } + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, 0xd334); + ocp_data |= BIT(1); + ocp_write_word(tp, MCU_TYPE_USB, 0xd334, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, 0xd4e8); + ocp_data |= BIT(1); + ocp_write_word(tp, MCU_TYPE_USB, 0xd4e8, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, 0xcfcc); + ocp_data |= BIT(9); + ocp_write_word(tp, MCU_TYPE_USB, 0xcfcc, ocp_data); + } +} + +static void r8156_firmware(struct r8152 *tp) +{ + if (tp->version == RTL_TEST_01) { + static u8 usb3_patch_t[] = { + 0x01, 0xe0, 0x05, 0xc7, + 0xf6, 0x65, 0x02, 0xc0, + 0x00, 0xb8, 0x40, 0x03, + 0x00, 0xd4, 0x00, 0x00 }; + u16 data; + + ocp_reg_write(tp, 0xb87c, 0x8099); + ocp_reg_write(tp, 0xb87e, 0x2a50); + ocp_reg_write(tp, 0xb87c, 0x80a1); + ocp_reg_write(tp, 0xb87e, 0x2a50); + ocp_reg_write(tp, 0xb87c, 0x809a); + ocp_reg_write(tp, 0xb87e, 0x5010); + ocp_reg_write(tp, 0xb87c, 0x80a2); + ocp_reg_write(tp, 0xb87e, 0x500f); + ocp_reg_write(tp, 0xb87c, 0x8087); + ocp_reg_write(tp, 0xb87e, 0xc0cf); + ocp_reg_write(tp, 0xb87c, 0x8080); + ocp_reg_write(tp, 0xb87e, 0x0f16); + ocp_reg_write(tp, 0xb87c, 0x8089); + ocp_reg_write(tp, 0xb87e, 0x161b); + ocp_reg_write(tp, 0xb87c, 0x808a); + ocp_reg_write(tp, 0xb87e, 0x1b1f); + + ocp_reg_write(tp, 0xac36, 0x0080); + ocp_reg_write(tp, 0xac4a, 0xff00); + data = ocp_reg_read(tp, 0xac34); + data &= ~BIT(4); + data |= BIT(2) | BIT(3); + ocp_reg_write(tp, 0xac34, data); + + data = ocp_reg_read(tp, 0xac54); + data &= ~(BIT(9) | BIT(10)); + ocp_reg_write(tp, 0xac54, data); + ocp_reg_write(tp, 0xb87c, 0x8099); + ocp_reg_write(tp, 0xb87e, 0x2050); + ocp_reg_write(tp, 0xb87c, 0x80a1); + ocp_reg_write(tp, 0xb87e, 0x2050); + ocp_reg_write(tp, 0xb87c, 0x809a); + ocp_reg_write(tp, 0xb87e, 0x5010); + ocp_reg_write(tp, 0xb87c, 0x80a2); + ocp_reg_write(tp, 0xb87e, 0x500f); + data = ocp_reg_read(tp, 0xac34); + data &= ~BIT(5); + data |= BIT(6) | BIT(7); + ocp_reg_write(tp, 0xac34, data); + + if (r8153_patch_request(tp, true)) { + netif_err(tp, drv, tp->netdev, + "patch request error\n"); + return; + } + + data = ocp_reg_read(tp, 0xb896); + data &= ~BIT(0); + ocp_reg_write(tp, 0xb896, data); + ocp_reg_write(tp, 0xb892, 0x0000); + ocp_reg_write(tp, 0xb88e, 0xc089); + ocp_reg_write(tp, 0xb890, 0xc1d0); + ocp_reg_write(tp, 0xb88e, 0xc08a); + ocp_reg_write(tp, 0xb890, 0xe0f0); + ocp_reg_write(tp, 0xb88e, 0xc08b); + ocp_reg_write(tp, 0xb890, 0xe0f0); + ocp_reg_write(tp, 0xb88e, 0xc08c); + ocp_reg_write(tp, 0xb890, 0xffff); + ocp_reg_write(tp, 0xb88e, 0xc08d); + ocp_reg_write(tp, 0xb890, 0xffff); + ocp_reg_write(tp, 0xb88e, 0xc08e); + ocp_reg_write(tp, 0xb890, 0xffff); + ocp_reg_write(tp, 0xb88e, 0xc08f); + ocp_reg_write(tp, 0xb890, 0xffff); + ocp_reg_write(tp, 0xb88e, 0xc090); + ocp_reg_write(tp, 0xb890, 0xff12); + + ocp_reg_write(tp, 0xb88e, 0xc09a); + ocp_reg_write(tp, 0xb890, 0x191a); + ocp_reg_write(tp, 0xb88e, 0xc09b); + ocp_reg_write(tp, 0xb890, 0x191a); + ocp_reg_write(tp, 0xb88e, 0xc09e); + ocp_reg_write(tp, 0xb890, 0x1d1e); + ocp_reg_write(tp, 0xb88e, 0xc09f); + ocp_reg_write(tp, 0xb890, 0x1d1e); + ocp_reg_write(tp, 0xb88e, 0xc0a0); + ocp_reg_write(tp, 0xb890, 0x1f20); + ocp_reg_write(tp, 0xb88e, 0xc0a1); + ocp_reg_write(tp, 0xb890, 0x1f20); + ocp_reg_write(tp, 0xb88e, 0xc0a2); + ocp_reg_write(tp, 0xb890, 0x2122); + ocp_reg_write(tp, 0xb88e, 0xc0a3); + ocp_reg_write(tp, 0xb890, 0x2122); + ocp_reg_write(tp, 0xb88e, 0xc0a4); + ocp_reg_write(tp, 0xb890, 0x2324); + ocp_reg_write(tp, 0xb88e, 0xc0a5); + ocp_reg_write(tp, 0xb890, 0x2324); + + ocp_reg_write(tp, 0xb88e, 0xc029); + ocp_reg_write(tp, 0xb890, 0xdff3); + ocp_reg_write(tp, 0xb88e, 0xc02a); + ocp_reg_write(tp, 0xb890, 0xf3f3); + ocp_reg_write(tp, 0xb88e, 0xc02b); + ocp_reg_write(tp, 0xb890, 0xf3f3); + ocp_reg_write(tp, 0xb88e, 0xc02c); + ocp_reg_write(tp, 0xb890, 0xf3ef); + ocp_reg_write(tp, 0xb88e, 0xc02d); + ocp_reg_write(tp, 0xb890, 0xf3ef); + ocp_reg_write(tp, 0xb88e, 0xc02e); + ocp_reg_write(tp, 0xb890, 0xebe7); + ocp_reg_write(tp, 0xb88e, 0xc02f); + ocp_reg_write(tp, 0xb890, 0xebe7); + ocp_reg_write(tp, 0xb88e, 0xc030); + ocp_reg_write(tp, 0xb890, 0xe4e2); + ocp_reg_write(tp, 0xb88e, 0xc031); + ocp_reg_write(tp, 0xb890, 0xe4e2); + ocp_reg_write(tp, 0xb88e, 0xc032); + ocp_reg_write(tp, 0xb890, 0xdfdf); + ocp_reg_write(tp, 0xb88e, 0xc033); + ocp_reg_write(tp, 0xb890, 0xdfdf); + ocp_reg_write(tp, 0xb88e, 0xc034); + ocp_reg_write(tp, 0xb890, 0xdfdf); + ocp_reg_write(tp, 0xb88e, 0xc035); + ocp_reg_write(tp, 0xb890, 0xdfdf); + ocp_reg_write(tp, 0xb88e, 0xc036); + ocp_reg_write(tp, 0xb890, 0xdfdf); + ocp_reg_write(tp, 0xb88e, 0xc037); + ocp_reg_write(tp, 0xb890, 0xdfdf); + ocp_reg_write(tp, 0xb88e, 0xc038); + ocp_reg_write(tp, 0xb890, 0xdfdf); + ocp_reg_write(tp, 0xb88e, 0xc039); + ocp_reg_write(tp, 0xb890, 0xdfdf); + ocp_reg_write(tp, 0xb88e, 0xc03a); + ocp_reg_write(tp, 0xb890, 0xdfdf); + ocp_reg_write(tp, 0xb88e, 0xc03b); + ocp_reg_write(tp, 0xb890, 0xdfdf); + ocp_reg_write(tp, 0xb88e, 0xc03c); + ocp_reg_write(tp, 0xb890, 0xdf00); + + data = ocp_reg_read(tp, 0xb896); + data |= BIT(0); + ocp_reg_write(tp, 0xb896, data); + + r8153_pre_ram_code(tp, 0x8024, 0x0000); + + data = ocp_reg_read(tp, 0xb820); + data |= BIT(7); + ocp_reg_write(tp, 0xb820, data); + + /* nc0_patch_RLE0847_171220_loop_test_USB */ + sram_write(tp, 0xA016, 0x0000); + sram_write(tp, 0xA012, 0x0000); + sram_write(tp, 0xA014, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8027); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x802e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8035); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x806d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8077); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x808c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8091); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x12ad); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd708); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3709); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8017); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3bdd); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x801f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xc100); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x38c0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1034); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4061); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb902); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa501); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x37b8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1034); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x12ad); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd71e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5fa6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x12ad); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1044); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x12ad); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd708); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3b0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1032); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x12ed); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x12ad); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd708); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2109); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1032); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x12e5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd501); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa130); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1a2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x401a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa140); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd020); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1a1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x401a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8120); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa8c0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd020); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1a1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x401a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8140); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd093); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1a5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x401a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa63f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1a2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x401a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa73f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd09e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1a2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x401a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa180); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce00); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd0dc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1a5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x401a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd502); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa401); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd03b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1c4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd704); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x401c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x617d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd502); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8401); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd503); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xcdc7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaf01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd704); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4013); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0f7a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd502); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8401); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8280); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0f7a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd504); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8208); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xcc08); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x08ba); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x08c6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0ee6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x068b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0e9d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd719); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x34a1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0da2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd704); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5f1c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd75e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3ffd); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0dca); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd707); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5e67); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd719); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2f79); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0dc0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd75e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2a51); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0db6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xffec); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa540); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1308); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x159e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xc445); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xdb02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c28); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0608); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c47); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0542); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd00a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x408d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd075); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x6045); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd05d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1a4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd07a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1b5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0771); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3b4d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x809f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2635); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0241); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2745); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0241); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x27d0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x80aa); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ec8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xc446); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xdb04); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa602); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd064); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1a1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd018); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1b0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x068b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd701); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0753); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x407b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0771); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2745); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0241); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x61da); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x608a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x6306); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x80c5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5e28); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2730); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x80b1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x80c5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0771); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8103); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xc447); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xdb08); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x406d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c07); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0501); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd056); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1c2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3ce1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x01ae); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2734); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x80c5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x7f8a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c07); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0501); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd04e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1b2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd0a8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1a7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xdb08); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xc447); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x26d7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8103); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x648a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5fbb); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0ca0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0320); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd501); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x80f0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa208); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce00); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xc317); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2c51); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8103); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xdb10); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xc448); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa620); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8710); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x41dd); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd502); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa306); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x415f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa210); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c1f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0004); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa330); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd501); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xc575); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8210); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd502); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8320); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa301); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2c59); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8103); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3a33); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x80ff); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd502); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8301); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd098); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd191); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x609f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8306); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8110); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa320); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd501); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa210); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd006); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1e3); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd501); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xc30f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4093); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xc033); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x02fb); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa0f0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8208); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce00); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x02fb); + sram_write(tp, 0xA026, 0x0279); + sram_write(tp, 0xA024, 0x159c); + sram_write(tp, 0xA022, 0x0d94); + sram_write(tp, 0xA020, 0x0ee1); + sram_write(tp, 0xA006, 0x0f46); + sram_write(tp, 0xA004, 0x12e2); + sram_write(tp, 0xA002, 0x12ea); + sram_write(tp, 0xA000, 0x1034); + sram_write(tp, 0xA008, 0xff00); + + /* nc2_patch_RLE0847_171109_USB */ + sram_write(tp, 0xA016, 0x0020); + sram_write(tp, 0xA012, 0x0000); + sram_write(tp, 0xA014, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8014); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8018); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8024); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8056); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8062); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8069); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8080); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa404); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8708); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0390); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd37a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd21a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0508); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8301); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd164); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd04d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0441); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5fb4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xcf0c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0437); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x010c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xcb60); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd71f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x61ee); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd71f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x210c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x001a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5f57); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbb80); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x605f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x9b80); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8301); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1c3); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd074); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa301); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfff1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xcb62); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb910); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd71f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x7fae); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x9930); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xcb80); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8190); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x82a0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x800a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8406); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa780); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd141); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd040); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0441); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5fb4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xcb82); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8701); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa70c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa190); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa2b4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa00a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa404); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x6041); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa402); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0441); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5fa7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x02ed); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8301); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd164); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd04d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0441); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5fb4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0450); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb401); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0236); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb808); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbb80); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa301); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1c3); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd074); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x03f3); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xcb17); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0441); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8ec0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0426); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xae40); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0426); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0cc0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0e80); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0426); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaec0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0426); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x34a0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x012c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd701); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5d8e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0134); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xcb23); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0441); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8ec0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0426); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xae40); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0426); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0cc0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0e80); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0426); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaec0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0426); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd701); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5dee); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0249); + sram_write(tp, 0xA10E, 0x0239); + sram_write(tp, 0xA10C, 0x0119); + sram_write(tp, 0xA10A, 0x03f2); + sram_write(tp, 0xA108, 0x0231); + sram_write(tp, 0xA106, 0x0413); + sram_write(tp, 0xA104, 0x0108); + sram_write(tp, 0xA102, 0x0506); + sram_write(tp, 0xA100, 0x038e); + sram_write(tp, 0xA110, 0x00ff); + + /* uc2_patch_RLE0847_171006_calc_txcrc_reg_write_seq_USB */ + sram_write(tp, 0xb87c, 0x82c1); + sram_write(tp, 0xb87e, 0xaf82); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xcdaf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x82d6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaf82); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd9af); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x82dc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0282); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xdc02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x830c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaf03); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd7af); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0eea); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaf0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe4f8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfaef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x69e0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8169); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xac23); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1ee0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x815d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xad23); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1bf7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0ee0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xffcf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xad26); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfa02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0b99); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0283); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3cf6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0ee0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xffcf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xac26); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfaae); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0302); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d70); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef96); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfefc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x04f8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfaef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x69e0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8169); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xac24); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1ee0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x815d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xad24); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1bf7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0ee0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xffcf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xad26); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfa02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8349); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0283); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3cf6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0ee0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xffcf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xac26); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfaae); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0302); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x861d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef96); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfefc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x04f8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf70f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe0ff); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xcfad); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x27fa); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf60f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfc04); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf8f9); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfaef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x69e0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x81a3); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0502); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8375); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xae16); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa001); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0502); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x83aa); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xae0e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa002); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0502); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x848f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xae06); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa003); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0302); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x857e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef96); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfefd); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfc04); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf8f9); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfaef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x69e1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xad2b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x16ee); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x81a4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00ee); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x81a5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00ee); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x81a6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x01ee); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x81a7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x01ee); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x81a3); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x01ae); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0ee1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x815d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf62c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe581); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5dbf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8663); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0243); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5cef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x96fe); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfdfc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x04f8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf9fa); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef69); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe281); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa6e3); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x81a7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef13); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3905); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xac2f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1da2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0417); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0285); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf0e1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x815d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf62c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe581); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5dbf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8663); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0243); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5cee); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x81a3); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00ae); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4412); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd301); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0284); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x20e6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x81a6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe781); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa75d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0303); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef12); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c12); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1e13); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe281); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa4e3); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x81a5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5d03); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x030c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x260c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x341e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x121e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x13bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8666); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x023e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe8d1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x09bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8669); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x023e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe8bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x866c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0243); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5cbf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8675); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0243); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5cee); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x81a3); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x02ef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x96fe); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfdfc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x04f8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf9fa); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef69); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa201); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0abf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x867b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x023f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2cef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x64ae); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x22a2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x020a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf86); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x7e02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3f2c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef64); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xae15); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa203); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0abf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8681); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x023f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2cef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x64ae); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x08bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8684); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x023f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2cef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x6483); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c64); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c32); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1a63); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf81); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa81a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x961f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x66ef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x563d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0004); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xad37); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1fd9); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef79); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf86); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x6602); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3ee8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef16); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x290a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf86); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x6902); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3ee8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf86); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x6c02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x435c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef97); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1916); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaed9); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef96); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfefd); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfc04); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf8f9); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfaef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x69bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8678); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x023f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2cad); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2879); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd103); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf86); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x7202); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3ee8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf86); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x6f02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3f2c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa094); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x62e2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x81a6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe381); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa7d1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x04bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8672); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x023e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe8bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x866f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x023f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2cef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x100d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x121f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1259); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x03a1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0043); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef10); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1f13); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5903); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa100); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3a02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x851c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe681); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa4e7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x81a5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5d03); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x03ef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x120c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x121e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x130c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x121e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x120c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x121e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x13bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8666); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x023e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe8d1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x09bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8669); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x023e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe8bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x866c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0243); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5cbf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8675); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0243); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5cee); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x81a3); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x03ae); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x06bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8675); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0243); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5cef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x96fe); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfdfc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x04f8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf9fa); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef69); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1f66); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8283); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c32); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef12); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf86); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8702); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3ee8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef46); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3c00); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x02ad); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2741); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef46); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2c00); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x05bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8672); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x023e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe8bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x866f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x023f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2cbf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x868a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x023e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe8ef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x13bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x868d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x023e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe8bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8690); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0243); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5cef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x10bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x868a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x023e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe8ef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1311); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf86); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8d02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3ee8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf86); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x9002); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x435c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2b02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x16ae); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb7ef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x96fe); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfdfc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x04f8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf9fa); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef69); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf86); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x7802); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3f2c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xad28); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ee2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x81a6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe381); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa7d1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x03bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8672); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x023e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe8bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x866f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x023f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2cef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1259); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x03a1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0014); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef13); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5903); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa100); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0da0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x900a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x13e7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x81a7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xee81); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa301); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xae2f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa094); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x26d1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x04bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8672); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x023e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe8bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x866f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x023f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2cef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x100d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x161f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1259); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x03a1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x000d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef10); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d14); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1f13); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5903); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa100); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x02ae); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xcdbf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8675); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0243); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5cef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x96fe); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfdfc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x04f8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf9fa); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef69); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd209); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd100); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf86); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x6602); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3ee8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef32); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3b0e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xad3f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x11ef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x12bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8669); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x023e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe8bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x866c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0243); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5c12); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaee8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef96); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfefd); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfc04); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf8f9); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfaef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x69e1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xad2b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0602); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x85f0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0286); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x44e1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8169); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf62c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe581); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x69e1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x815d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf62c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe581); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5def); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x96fe); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfdfc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x04f8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf9fa); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef69); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xee81); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa400); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xee81); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xee81); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa601); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xee81); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa701); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xee81); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa300); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef96); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfefd); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfc04); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x44a6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe070); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb468); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xdab4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x68ff); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb468); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf0b6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3a20); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb638); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xeeb6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x38ff); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb638); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x10b5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0032); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x54b5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0076); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x10b4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4e70); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb450); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x52b4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4e66); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb44e); + sram_write(tp, 0xb85e, 0x03d1); + sram_write(tp, 0xb860, 0x0ee4); + sram_write(tp, 0xb862, 0x0fde); + sram_write(tp, 0xb864, 0xffff); + sram_write(tp, 0xb878, 0x0001); + + data = ocp_reg_read(tp, 0xb820); + data &= ~BIT(7); + ocp_reg_write(tp, 0xb820, data); + + /* uc_patch_RLE0847_171212_customer_USB */ + sram_write(tp, 0x8586, 0xaf85); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x92af); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8598); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaf85); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa1af); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x85a1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0285); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa1af); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0414); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0286); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x7e02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1273); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaf10); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1cf8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf9e3); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x83ab); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe0a6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00e1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa601); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d04); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x580f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa008); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4659); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0f9e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4239); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0aab); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3ee0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xffcf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xad26); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x07f7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0ead); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2729); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xae02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf60e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe283); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xab1f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x239f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x28e0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb714); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe1b7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x155c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c00); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x9fee); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0285); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfee0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xffcf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xad26); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0af7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0fe0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xffcf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xac27); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfaf6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0fe2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x83ab); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1f23); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x9f03); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaf85); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa6fd); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfc04); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf8f9); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfb02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x866d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1f77); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe0b7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2ee1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb72f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0286); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4ce0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb72c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe1b7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2d02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x864c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe0b7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2ae1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb72b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0286); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4ce0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb728); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe1b7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2902); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x864c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe0b7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x26e1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb727); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0286); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4cef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x47d2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb8e6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb468); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe5b4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x69d2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbce6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb468); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe4b4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x6902); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x866d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfffd); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfc04); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf8f9); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfad2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00ef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x675e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0001); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1f46); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d71); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5f7f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xffad); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2803); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x7fa0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x010d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4112); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa210); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe8fe); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfdfc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x04f8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe0b4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x62e1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb463); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x6901); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe4b4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x62e5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb463); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfc04); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf8f9); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfaef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x69e1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8016); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xad2d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3bbf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x86fd); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x023f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x08ac); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2832); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf87); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0002); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3f08); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xad28); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x29d2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x03bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8703); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x023f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x080d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x11f6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2fef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x31e0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8ff3); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf627); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1b03); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaa01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x82e0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8ff2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf627); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1b03); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaa01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8202); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x86ca); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef69); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfefd); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfc04); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfbfa); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef69); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf9f8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf8f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf4e1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8fed); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1c21); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1a92); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe08f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xeee1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8fef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef74); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe08f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf0e1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8ff1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef64); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0217); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x70fc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfdef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x96fe); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xff04); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2087); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0620); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8709); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0087); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0cbb); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa880); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xeea8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8070); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa880); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x60a8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x18e8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa818); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x60a8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1a00); + sram_write(tp, 0xb818, 0x040e); + sram_write(tp, 0xb81a, 0x1019); + sram_write(tp, 0xb81c, 0xffff); + sram_write(tp, 0xb81e, 0xffff); + sram_write(tp, 0xb832, 0x0003); + + r8153_post_ram_code(tp, 0x8024); + ocp_reg_write(tp, 0xc414, 0x0200); + + r8153_patch_request(tp, false); + + r8156_lock_mian(tp, true); + + sram_write(tp, 0x80c9, 0x3478); + sram_write(tp, 0x80d0, 0xfe8f); + sram_write(tp, 0x80ca, 0x7843); + sram_write(tp, 0x80cb, 0x43b0); + sram_write(tp, 0x80cb, 0x4380); + sram_write(tp, 0x80cc, 0xb00b); + sram_write(tp, 0x80cd, 0x0ba1); + sram_write(tp, 0x80d8, 0x1078); + sram_write(tp, 0x8016, 0x3f00); + sram_write(tp, 0x8fed, 0x0386); + sram_write(tp, 0x8fee, 0x86f4); + sram_write(tp, 0x8fef, 0xf486); + sram_write(tp, 0x8ff0, 0x86fd); + sram_write(tp, 0x8ff1, 0xfd28); + sram_write(tp, 0x8ff2, 0x285a); + sram_write(tp, 0x8ff3, 0x5a70); + sram_write(tp, 0x8ff4, 0x7000); + sram_write(tp, 0x8ff5, 0x005d); + sram_write(tp, 0x8ff6, 0x5d77); + sram_write(tp, 0x8ff7, 0x7778); + sram_write(tp, 0x8ff8, 0x785f); + sram_write(tp, 0x8ff9, 0x5f74); + sram_write(tp, 0x8ffa, 0x7478); + sram_write(tp, 0x8ffb, 0x7858); + sram_write(tp, 0x8ffc, 0x5870); + sram_write(tp, 0x8ffd, 0x7078); + sram_write(tp, 0x8ffe, 0x7850); + sram_write(tp, 0x8fff, 0x5000); + sram_write(tp, 0x80dd, 0x34a4); + sram_write(tp, 0x80e4, 0xfe7f); + sram_write(tp, 0x80e6, 0x4a19); + sram_write(tp, 0x80de, 0xa443); + sram_write(tp, 0x80df, 0x43a0); + sram_write(tp, 0x80df, 0x43a0); + sram_write(tp, 0x80e0, 0xa00a); + sram_write(tp, 0x80e1, 0x0a00); + sram_write(tp, 0x80e8, 0x700c); + sram_write(tp, 0x80e2, 0x0007); + sram_write(tp, 0x80e3, 0x07fe); + sram_write(tp, 0x80ec, 0x0e78); + sram_write(tp, 0x80b5, 0x42f7); + sram_write(tp, 0x80bc, 0xfaa4); + sram_write(tp, 0x80bf, 0x1f80); + sram_write(tp, 0x80be, 0xff1f); + sram_write(tp, 0x80b7, 0x4280); + sram_write(tp, 0x80b6, 0xf742); + sram_write(tp, 0x80b8, 0x800f); + sram_write(tp, 0x80b9, 0x0fab); + sram_write(tp, 0x80c1, 0x1e0a); + sram_write(tp, 0x80c0, 0x801e); + sram_write(tp, 0x80bd, 0xa4ff); + sram_write(tp, 0x80bb, 0x0bfa); + sram_write(tp, 0x80ba, 0xab0b); + ocp_reg_write(tp, OCP_SRAM_ADDR, 0x818d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x003d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x009b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00cb); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00e5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00f2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00f9); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00fd); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00ff); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00c2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0065); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0034); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x001b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x000e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0007); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0003); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0002); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0001); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + sram_write(tp, 0x8163, 0xdb06); + sram_write(tp, 0x816a, 0xdb06); + sram_write(tp, 0x8171, 0xdb06); + + r8156_lock_mian(tp, false); + + r8153b_clear_bp(tp, MCU_TYPE_USB); + + generic_ocp_write(tp, 0xe600, 0xff, sizeof(usb3_patch_t), + usb3_patch_t, MCU_TYPE_USB); + + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0xa000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_0, 0x033e); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_1, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_2, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_3, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_4, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_5, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_6, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_7, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_8, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_9, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_10, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_11, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_12, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_13, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_14, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_15, 0x0000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP2_EN, 0x0001); + } +} + static void r8153_aldps_en(struct r8152 *tp, bool enable) { u16 data; @@ -3199,16 +7283,8 @@ static void r8153_aldps_en(struct r8152 *tp, bool enable) break; } } -} -static void r8153b_aldps_en(struct r8152 *tp, bool enable) -{ - r8153_aldps_en(tp, enable); - - if (enable) - r8153b_ups_flags_w1w0(tp, UPS_FLAGS_EN_ALDPS, 0); - else - r8153b_ups_flags_w1w0(tp, 0, UPS_FLAGS_EN_ALDPS); + tp->ups_info.aldps = enable; } static void r8153_eee_en(struct r8152 *tp, bool enable) @@ -3229,22 +7305,24 @@ static void r8153_eee_en(struct r8152 *tp, bool enable) ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); ocp_reg_write(tp, OCP_EEE_CFG, config); + + tp->ups_info.eee = enable; } -static void r8153b_eee_en(struct r8152 *tp, bool enable) +static void r8156_eee_en(struct r8152 *tp, bool enable) { + u16 config; + r8153_eee_en(tp, enable); - if (enable) - r8153b_ups_flags_w1w0(tp, UPS_FLAGS_EN_EEE, 0); - else - r8153b_ups_flags_w1w0(tp, 0, UPS_FLAGS_EN_EEE); -} + config = ocp_reg_read(tp, 0xa6d4); -static void r8153b_enable_fc(struct r8152 *tp) -{ - r8152b_enable_fc(tp); - r8153b_ups_flags_w1w0(tp, UPS_FLAGS_EN_FLOW_CTR, 0); + if (enable) + config |= BIT(0); + else + config &= ~BIT(0); + + ocp_reg_write(tp, 0xa6d4, config); } static void r8153_hw_phy_cfg(struct r8152 *tp) @@ -3259,6 +7337,8 @@ static void r8153_hw_phy_cfg(struct r8152 *tp) r8153_eee_en(tp, false); ocp_reg_write(tp, OCP_EEE_ADV, 0); + r8153_firmware(tp); + if (tp->version == RTL_VER_03) { data = ocp_reg_read(tp, OCP_EEE_CFG); data &= ~CTAP_SHORT_EN; @@ -3288,8 +7368,10 @@ static void r8153_hw_phy_cfg(struct r8152 *tp) sram_write(tp, SRAM_10M_AMP1, 0x00af); sram_write(tp, SRAM_10M_AMP2, 0x0208); - r8153_eee_en(tp, true); - ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX); + if (tp->eee_en) { + r8153_eee_en(tp, true); + ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv); + } r8153_aldps_en(tp, true); r8152b_enable_fc(tp); @@ -3322,16 +7404,21 @@ static u32 r8152_efuse_read(struct r8152 *tp, u8 addr) static void r8153b_hw_phy_cfg(struct r8152 *tp) { - u32 ocp_data, ups_flags = 0; + u32 ocp_data; u16 data; /* disable ALDPS before updating the PHY parameters */ - r8153b_aldps_en(tp, false); + r8153_aldps_en(tp, false); /* disable EEE before updating the PHY parameters */ - r8153b_eee_en(tp, false); + r8153_eee_en(tp, false); ocp_reg_write(tp, OCP_EEE_ADV, 0); + /* U1/U2/L1 idle timer. 500 us */ + ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500); + + r8153b_firmware(tp); + r8153b_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags)); data = sram_read(tp, SRAM_GREEN_CFG); @@ -3370,34 +7457,42 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp) ocp_data |= PFM_PWM_SWITCH; ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); +#ifdef CONFIG_CTAP_SHORT_OFF + data = ocp_reg_read(tp, OCP_EEE_CFG); + data &= ~CTAP_SHORT_EN; + ocp_reg_write(tp, OCP_EEE_CFG, data); + + tp->ups_info.ctap_short_off = true; +#endif /* Advnace EEE */ if (!r8153_patch_request(tp, true)) { data = ocp_reg_read(tp, OCP_POWER_CFG); data |= EEE_CLKDIV_EN; ocp_reg_write(tp, OCP_POWER_CFG, data); + tp->ups_info.eee_ckdiv = true; data = ocp_reg_read(tp, OCP_DOWN_SPEED); data |= EN_EEE_CMODE | EN_EEE_1000 | EN_10M_CLKDIV; ocp_reg_write(tp, OCP_DOWN_SPEED, data); + tp->ups_info.eee_cmod_lv = true; + tp->ups_info._10m_ckdiv = true; + tp->ups_info.eee_plloff_giga = true; ocp_reg_write(tp, OCP_SYSCLK_CFG, 0); ocp_reg_write(tp, OCP_SYSCLK_CFG, clk_div_expo(5)); - - ups_flags |= UPS_FLAGS_EN_10M_CKDIV | UPS_FLAGS_250M_CKDIV | - UPS_FLAGS_EN_EEE_CKDIV | UPS_FLAGS_EEE_CMOD_LV_EN | - UPS_FLAGS_EEE_PLLOFF_GIGA; + tp->ups_info._250m_ckdiv = true; r8153_patch_request(tp, false); } - r8153b_ups_flags_w1w0(tp, ups_flags, 0); + if (tp->eee_en) { + r8153_eee_en(tp, true); + ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv); + } - r8153b_eee_en(tp, true); - ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX); - - r8153b_aldps_en(tp, true); - r8153b_enable_fc(tp); - r8153_u2p3en(tp, true); + r8153_aldps_en(tp, true); + r8152b_enable_fc(tp); +// r8153_u2p3en(tp, true); set_bit(PHY_RESET, &tp->flags); } @@ -3407,7 +7502,6 @@ static void r8153_first_init(struct r8152 *tp) u32 ocp_data; int i; - r8153_mac_clk_spd(tp, false); rxdy_gated_en(tp, true); r8153_teredo_off(tp); @@ -3469,8 +7563,6 @@ static void r8153_enter_oob(struct r8152 *tp) u32 ocp_data; int i; - r8153_mac_clk_spd(tp, true); - ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); @@ -3547,111 +7639,198 @@ static void rtl8153_disable(struct r8152 *tp) r8153_aldps_en(tp, true); } -static void rtl8153b_disable(struct r8152 *tp) +static int rtl8156_enable(struct r8152 *tp) { - r8153b_aldps_en(tp, false); - rtl_disable(tp); - rtl_reset_bmu(tp); - r8153b_aldps_en(tp, true); -} + u32 ocp_data; + u16 speed; -static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) -{ - u16 bmcr, anar, gbcr; - enum spd_duplex speed_duplex; - int ret = 0; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return -ENODEV; - anar = r8152_mdio_read(tp, MII_ADVERTISE); - anar &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL | - ADVERTISE_100HALF | ADVERTISE_100FULL); - if (tp->mii.supports_gmii) { - gbcr = r8152_mdio_read(tp, MII_CTRL1000); - gbcr &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); - } else { - gbcr = 0; + set_tx_qlen(tp); + rtl_set_eee_plus(tp); + r8153_set_rx_early_timeout(tp); + r8153_set_rx_early_size(tp); + + switch (tp->version) { + case RTL_TEST_01: + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, 0xe95a); + ocp_data &= ~0xf; + ocp_data |= 5; + ocp_write_byte(tp, MCU_TYPE_PLA, 0xe95a, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, 0xe940); + ocp_data &= ~0x1f; + ocp_data |= 4; + ocp_write_byte(tp, MCU_TYPE_PLA, 0xe940, ocp_data); + break; + default: + break; } + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR1); + ocp_data &= ~(BIT(3) | BIT(9) | BIT(8)); + speed = rtl8152_get_speed(tp); + if ((speed & (_10bps | _100bps)) && !(speed & FULL_DUP)) { + ocp_data |= BIT(9); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR1, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); + ocp_data &= ~BIT(8); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); + } else { + ocp_data |= BIT(9) | BIT(8); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR1, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); + ocp_data |= BIT(8); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); + } + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); + if (speed & _2500bps) + ocp_data &= ~BIT(6); + else + ocp_data |= BIT(6); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); + + if (speed & _1000bps) + ocp_write_byte(tp, MCU_TYPE_PLA, 0xe04c, 0x11); + else if (speed & _500bps) + ocp_write_byte(tp, MCU_TYPE_PLA, 0xe04c, 0x3d); + + return rtl_enable(tp); +} + +static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex, + u32 advertising) +{ + u16 bmcr; + int ret = 0; + if (autoneg == AUTONEG_DISABLE) { - if (speed == SPEED_10) { - bmcr = 0; - anar |= ADVERTISE_10HALF | ADVERTISE_10FULL; - speed_duplex = FORCE_10M_HALF; - } else if (speed == SPEED_100) { + if (duplex != DUPLEX_HALF && duplex != DUPLEX_FULL) + return -EINVAL; + + switch (speed) { + case SPEED_10: + bmcr = BMCR_SPEED10; + if (duplex == DUPLEX_FULL) { + bmcr |= BMCR_FULLDPLX; + tp->ups_info.speed_duplex = FORCE_10M_FULL; + } else { + tp->ups_info.speed_duplex = FORCE_10M_HALF; + } + break; + case SPEED_100: bmcr = BMCR_SPEED100; - anar |= ADVERTISE_100HALF | ADVERTISE_100FULL; - speed_duplex = FORCE_100M_HALF; - } else if (speed == SPEED_1000 && tp->mii.supports_gmii) { - bmcr = BMCR_SPEED1000; - gbcr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF; - speed_duplex = NWAY_1000M_FULL; - } else { + if (duplex == DUPLEX_FULL) { + bmcr |= BMCR_FULLDPLX; + tp->ups_info.speed_duplex = FORCE_100M_FULL; + } else { + tp->ups_info.speed_duplex = FORCE_100M_HALF; + } + break; + case SPEED_1000: + if (tp->mii.supports_gmii) { + bmcr = BMCR_SPEED1000 | BMCR_FULLDPLX; + tp->ups_info.speed_duplex = NWAY_1000M_FULL; + break; + } + default: ret = -EINVAL; goto out; } - if (duplex == DUPLEX_FULL) { - bmcr |= BMCR_FULLDPLX; - if (speed != SPEED_1000) - speed_duplex++; - } + if (duplex == DUPLEX_FULL) + tp->mii.full_duplex = 1; + else + tp->mii.full_duplex = 0; + + tp->mii.force_media = 1; } else { - if (speed == SPEED_10) { - if (duplex == DUPLEX_FULL) { - anar |= ADVERTISE_10HALF | ADVERTISE_10FULL; - speed_duplex = NWAY_10M_FULL; - } else { - anar |= ADVERTISE_10HALF; - speed_duplex = NWAY_10M_HALF; + u16 anar, tmp1; + u32 support; + + support = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full; + + if (tp->mii.supports_gmii) { + support |= ADVERTISED_1000baseT_Full; + + if (test_bit(SUPPORT_2500FULL, &tp->flags)) + support |= ADVERTISED_2500baseX_Full; + } + + if (!(advertising & support)) + return -EINVAL; + + anar = r8152_mdio_read(tp, MII_ADVERTISE); + tmp1 = anar & ~(ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL); + if (advertising & ADVERTISED_10baseT_Half) { + tmp1 |= ADVERTISE_10HALF; + tp->ups_info.speed_duplex = NWAY_10M_HALF; + } + if (advertising & ADVERTISED_10baseT_Full) { + tmp1 |= ADVERTISE_10FULL; + tp->ups_info.speed_duplex = NWAY_10M_FULL; + } + + if (advertising & ADVERTISED_100baseT_Half) { + tmp1 |= ADVERTISE_100HALF; + tp->ups_info.speed_duplex = NWAY_100M_HALF; + } + if (advertising & ADVERTISED_100baseT_Full) { + tmp1 |= ADVERTISE_100FULL; + tp->ups_info.speed_duplex = NWAY_100M_FULL; + } + + if (anar != tmp1) { + r8152_mdio_write(tp, MII_ADVERTISE, tmp1); + tp->mii.advertising = tmp1; + } + + if (tp->mii.supports_gmii) { + u16 gbcr, tmp2 = 0; + + gbcr = r8152_mdio_read(tp, MII_CTRL1000); + tmp2 = gbcr & ~(ADVERTISE_1000FULL | + ADVERTISE_1000HALF); + + if (advertising & ADVERTISED_1000baseT_Half) { + tmp2 |= ADVERTISE_1000HALF; + tp->ups_info.speed_duplex = NWAY_1000M_FULL; } - } else if (speed == SPEED_100) { - if (duplex == DUPLEX_FULL) { - anar |= ADVERTISE_10HALF | ADVERTISE_10FULL; - anar |= ADVERTISE_100HALF | ADVERTISE_100FULL; - speed_duplex = NWAY_100M_FULL; - } else { - anar |= ADVERTISE_10HALF; - anar |= ADVERTISE_100HALF; - speed_duplex = NWAY_100M_HALF; + if (advertising & ADVERTISED_1000baseT_Full) { + tmp2 |= ADVERTISE_1000FULL; + tp->ups_info.speed_duplex = NWAY_1000M_FULL; } - } else if (speed == SPEED_1000 && tp->mii.supports_gmii) { - if (duplex == DUPLEX_FULL) { - anar |= ADVERTISE_10HALF | ADVERTISE_10FULL; - anar |= ADVERTISE_100HALF | ADVERTISE_100FULL; - gbcr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF; - } else { - anar |= ADVERTISE_10HALF; - anar |= ADVERTISE_100HALF; - gbcr |= ADVERTISE_1000HALF; - } - speed_duplex = NWAY_1000M_FULL; - } else { - ret = -EINVAL; - goto out; + + if (gbcr != tmp2) + r8152_mdio_write(tp, MII_CTRL1000, tmp2); + + gbcr = ocp_reg_read(tp, 0xa5d4); + tmp2 = gbcr & ~BIT(7); + + if (advertising & ADVERTISED_2500baseX_Full) + tmp2 |= BIT(7); + + if (gbcr != tmp2) + ocp_reg_write(tp, 0xa5d4, tmp2); } bmcr = BMCR_ANENABLE | BMCR_ANRESTART; + + tp->mii.force_media = 0; } if (test_and_clear_bit(PHY_RESET, &tp->flags)) bmcr |= BMCR_RESET; - if (tp->mii.supports_gmii) - r8152_mdio_write(tp, MII_CTRL1000, gbcr); - - r8152_mdio_write(tp, MII_ADVERTISE, anar); r8152_mdio_write(tp, MII_BMCR, bmcr); - switch (tp->version) { - case RTL_VER_08: - case RTL_VER_09: - r8153b_ups_flags_w1w0(tp, ups_flags_speed(speed_duplex), - UPS_FLAGS_SPEED_MASK); - break; - - default: - break; - } - if (bmcr & BMCR_RESET) { int i; @@ -3666,6 +7845,64 @@ out: return ret; } +static bool rtl_speed_down(struct r8152 *tp) +{ + bool ret = false; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return ret; + + if ((tp->saved_wolopts & WAKE_ANY) && !(tp->saved_wolopts & WAKE_PHY)) { + u16 bmcr; + + bmcr = r8152_mdio_read(tp, MII_BMCR); + + if (netif_carrier_ok(tp->netdev) && (bmcr & BMCR_ANENABLE) && + (r8152_mdio_read(tp, MII_EXPANSION) & EXPANSION_NWAY)) { + u16 anar, gbcr = 0, lpa, gbcr2 = 0; + + anar = r8152_mdio_read(tp, MII_ADVERTISE); + anar &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL); + + if (tp->mii.supports_gmii) { + gbcr = r8152_mdio_read(tp, MII_CTRL1000); + gbcr &= ~(ADVERTISE_1000FULL | + ADVERTISE_1000HALF); + if (test_bit(SUPPORT_2500FULL, &tp->flags)) { + gbcr2 = ocp_reg_read(tp, 0xa5d4); + gbcr2 &= ~BIT(7); + } + } + + lpa = r8152_mdio_read(tp, MII_LPA); + if (lpa & (LPA_10HALF | LPA_10FULL)) { + anar |= ADVERTISE_10HALF | ADVERTISE_10FULL; + } else if (lpa & (LPA_100HALF | LPA_100FULL)) { + anar |= ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL; + } else { + goto out1; + } + + if (tp->mii.supports_gmii) { + r8152_mdio_write(tp, MII_CTRL1000, gbcr); + if (test_bit(SUPPORT_2500FULL, &tp->flags)) + ocp_reg_write(tp, 0xa5d4, gbcr2); + } + + r8152_mdio_write(tp, MII_ADVERTISE, anar); + r8152_mdio_write(tp, MII_BMCR, bmcr | BMCR_ANRESTART); + + set_bit(RECOVER_SPEED, &tp->flags); + ret = true; + } + } + +out1: + return ret; +} + static void rtl8152_up(struct r8152 *tp) { if (test_bit(RTL8152_UNPLUG, &tp->flags)) @@ -3687,28 +7924,46 @@ static void rtl8152_down(struct r8152 *tp) r8152_aldps_en(tp, false); r8152b_enter_oob(tp); r8152_aldps_en(tp, true); + if (tp->version == RTL_VER_01) + rtl8152_set_speed(tp, AUTONEG_ENABLE, 0, 0, 3); + else + rtl_speed_down(tp); } static void rtl8153_up(struct r8152 *tp) { + u32 ocp_data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; r8153_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_aldps_en(tp, false); + r8153_mac_clk_spd(tp, false); r8153_first_init(tp); - r8153_aldps_en(tp, true); - switch (tp->version) { - case RTL_VER_03: - case RTL_VER_04: - break; - case RTL_VER_05: - case RTL_VER_06: - default: - r8153_u2p3en(tp, true); - break; + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, 0xe90a); + ocp_data |= BIT(0); + ocp_write_byte(tp, MCU_TYPE_PLA, 0xe90a, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, 0xe007); + ocp_data &= ~BIT(7); + ocp_write_byte(tp, MCU_TYPE_PLA, 0xe007, ocp_data); + + if (!work_busy(&tp->hw_phy_work.work)) { + r8153_aldps_en(tp, true); + + switch (tp->version) { + case RTL_VER_03: + case RTL_VER_04: + break; + case RTL_VER_05: + case RTL_VER_06: + default: + r8153_u2p3en(tp, true); + break; + } } r8153_u1u2en(tp, true); @@ -3716,49 +7971,196 @@ static void rtl8153_up(struct r8152 *tp) static void rtl8153_down(struct r8152 *tp) { + u32 ocp_data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) { rtl_drop_queued_tx(tp); return; } + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, 0xe90a); + ocp_data &= ~BIT(0); + ocp_write_byte(tp, MCU_TYPE_PLA, 0xe90a, ocp_data); + r8153_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_power_cut_en(tp, false); r8153_aldps_en(tp, false); + r8153_mac_clk_spd(tp, true); r8153_enter_oob(tp); r8153_aldps_en(tp, true); + rtl_speed_down(tp); } static void rtl8153b_up(struct r8152 *tp) { + u32 ocp_data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); - r8153b_aldps_en(tp, false); + r8153_aldps_en(tp, false); r8153_first_init(tp); ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B); - r8153b_aldps_en(tp, true); - r8153_u2p3en(tp, true); + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data &= ~BIT(14); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + + if (!work_busy(&tp->hw_phy_work.work)) { + r8153_aldps_en(tp, true); +// r8153_u2p3en(tp, true); + } + r8153b_u1u2en(tp, true); } static void rtl8153b_down(struct r8152 *tp) { + u32 ocp_data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) { rtl_drop_queued_tx(tp); return; } + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data |= BIT(14); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153b_power_cut_en(tp, false); - r8153b_aldps_en(tp, false); + r8153_aldps_en(tp, false); r8153_enter_oob(tp); - r8153b_aldps_en(tp, true); + r8153_aldps_en(tp, true); + rtl_speed_down(tp); +} + +static void rtl8156_up(struct r8152 *tp) +{ + u32 ocp_data; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + r8153b_u1u2en(tp, false); + r8153_u2p3en(tp, false); + r8153_aldps_en(tp, false); + + rxdy_gated_en(tp, true); + r8153_teredo_off(tp); + + ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); + ocp_data &= ~RCR_ACPT_ALL; + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); + + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0); + rtl_reset_bmu(tp); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); + ocp_data &= ~NOW_IS_OOB; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); + ocp_data &= ~MCU_BORW_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); + + rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); + + ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data); + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); + + /* share FIFO settings */ + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, 0xc0a2); + ocp_data &= ~0xfff; + ocp_data |= 0x08; + ocp_write_word(tp, MCU_TYPE_PLA, 0xc0a2, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, 0xc0a6); + ocp_data &= ~0xfff; + ocp_data |= 0x0100; + ocp_write_word(tp, MCU_TYPE_PLA, 0xc0a6, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, 0xc0a8); + ocp_data &= ~0xfff; + ocp_data |= 0x0200; + ocp_write_word(tp, MCU_TYPE_PLA, 0xc0a8, ocp_data); + + /* TX share fifo free credit full threshold */ + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL2); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data &= ~BIT(14); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + +// ocp_data = ocp_read_word(tp, MCU_TYPE_USB, 0xd32a); +// ocp_data &= ~(BIT(8) | BIT(9)); +// ocp_write_word(tp, MCU_TYPE_USB, 0xd32a, ocp_data); + + ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, 0x00600400); + + if (tp->saved_wolopts != __rtl_get_wol(tp)) { + netif_warn(tp, ifup, tp->netdev, "wol setting is changed\n"); + __rtl_set_wol(tp, tp->saved_wolopts); + } + + if (!work_busy(&tp->hw_phy_work.work)) { + r8153_aldps_en(tp, true); + r8153_u2p3en(tp, true); + } + + r8153b_u1u2en(tp, true); +} + +static void rtl8156_down(struct r8152 *tp) +{ + u32 ocp_data; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) { + rtl_drop_queued_tx(tp); + return; + } + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data |= BIT(14); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + + r8153b_u1u2en(tp, false); + r8153_u2p3en(tp, false); + r8153b_power_cut_en(tp, false); + r8153_aldps_en(tp, false); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); + ocp_data &= ~NOW_IS_OOB; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); + + rtl_disable(tp); + rtl_reset_bmu(tp); + + /* Clear teredo wake event. bit[15:8] is the teredo wakeup + * type. Set it to zero. bits[7:0] are the W1C bits about + * the events. Set them to all 1 to clear them. + */ + ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_WAKE_BASE, 0x00ff); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); + ocp_data |= NOW_IS_OOB; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); + + rtl_rx_vlan_en(tp, true); + rxdy_gated_en(tp, false); + + ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); + ocp_data |= RCR_APM | RCR_AM | RCR_AB; + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); + + r8153_aldps_en(tp, true); + rtl_speed_down(tp); } static bool rtl8152_in_nway(struct r8152 *tp) @@ -3791,18 +8193,18 @@ static void set_carrier(struct r8152 *tp) { struct net_device *netdev = tp->netdev; struct napi_struct *napi = &tp->napi; - u8 speed; + u16 speed; speed = rtl8152_get_speed(tp); if (speed & LINK_STATUS) { if (!netif_carrier_ok(netdev)) { tp->rtl_ops.enable(tp); - set_bit(RTL8152_SET_RX_MODE, &tp->flags); netif_stop_queue(netdev); napi_disable(napi); netif_carrier_on(netdev); rtl_start_rx(tp); + rtl8152_set_rx_mode(netdev); napi_enable(&tp->napi); netif_wake_queue(netdev); netif_info(tp, link, netdev, "carrier on\n"); @@ -3813,18 +8215,18 @@ static void set_carrier(struct r8152 *tp) } else { if (netif_carrier_ok(netdev)) { netif_carrier_off(netdev); + tasklet_disable(&tp->tx_tl); napi_disable(napi); tp->rtl_ops.disable(tp); napi_enable(napi); + tasklet_enable(&tp->tx_tl); netif_info(tp, link, netdev, "carrier off\n"); } } } -static void rtl_work_func_t(struct work_struct *work) +static inline void __rtl_work_func(struct r8152 *tp) { - struct r8152 *tp = container_of(work, struct r8152, schedule.work); - /* If the device is unplugged or !netif_running(), the workqueue * doesn't need to wake the device, and could return directly. */ @@ -3845,13 +8247,13 @@ static void rtl_work_func_t(struct work_struct *work) if (test_and_clear_bit(RTL8152_LINK_CHG, &tp->flags)) set_carrier(tp); - if (test_and_clear_bit(RTL8152_SET_RX_MODE, &tp->flags)) - _rtl8152_set_rx_mode(tp->netdev); + if (test_bit(RTL8152_SET_RX_MODE, &tp->flags)) + rtl8152_set_rx_mode(tp->netdev); - /* don't schedule napi before linking */ - if (test_and_clear_bit(SCHEDULE_NAPI, &tp->flags) && + /* don't schedule tasket before linking */ + if (test_and_clear_bit(SCHEDULE_TASKLET, &tp->flags) && netif_carrier_ok(tp->netdev)) - napi_schedule(&tp->napi); + tasklet_schedule(&tp->tx_tl); mutex_unlock(&tp->control); @@ -3859,10 +8261,8 @@ out1: usb_autopm_put_interface(tp->intf); } -static void rtl_hw_phy_work_func_t(struct work_struct *work) +static inline void __rtl_hw_phy_work_func(struct r8152 *tp) { - struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work); - if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; @@ -3873,14 +8273,49 @@ static void rtl_hw_phy_work_func_t(struct work_struct *work) tp->rtl_ops.hw_phy_cfg(tp); - rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex); + rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex, + tp->advertising); mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); } -#ifdef CONFIG_PM_SLEEP +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) + +static void rtl_work_func_t(void *data) +{ + struct r8152 *tp = (struct r8152 *)data; + + __rtl_work_func(tp); +} + +static void rtl_hw_phy_work_func_t(void *data) +{ + struct r8152 *tp = (struct r8152 *)data; + + __rtl_hw_phy_work_func(tp); +} + +#else + +static void rtl_work_func_t(struct work_struct *work) +{ + struct r8152 *tp = container_of(work, struct r8152, schedule.work); + + __rtl_work_func(tp); +} + +static void rtl_hw_phy_work_func_t(struct work_struct *work) +{ + struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work); + + __rtl_hw_phy_work_func(tp); +} + +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) && defined(CONFIG_PM_SLEEP) static int rtl_notifier(struct notifier_block *nb, unsigned long action, void *data) { @@ -3907,11 +8342,26 @@ static int rtl_notifier(struct notifier_block *nb, unsigned long action, } #endif +static int rtk_disable_diag(struct r8152 *tp) +{ + tp->rtk_enable_diag--; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base); + netif_info(tp, drv, tp->netdev, "disable rtk diag %d\n", + tp->rtk_enable_diag); + mutex_unlock(&tp->control); + usb_autopm_put_interface(tp->intf); + + return 0; +} + static int rtl8152_open(struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); int res = 0; + if (unlikely(tp->rtk_enable_diag)) + return -EBUSY; + res = alloc_all_mem(tp); if (res) goto out; @@ -3926,7 +8376,13 @@ static int rtl8152_open(struct net_device *netdev) netif_carrier_off(netdev); netif_start_queue(netdev); + smp_mb__before_atomic(); set_bit(WORK_ENABLE, &tp->flags); + smp_mb__after_atomic(); + + if (test_and_clear_bit(RECOVER_SPEED, &tp->flags)) + rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex, + tp->advertising); res = usb_submit_urb(tp->intr_urb, GFP_KERNEL); if (res) { @@ -3937,11 +8393,12 @@ static int rtl8152_open(struct net_device *netdev) goto out_unlock; } napi_enable(&tp->napi); + tasklet_enable(&tp->tx_tl); mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); -#ifdef CONFIG_PM_SLEEP +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) && defined(CONFIG_PM_SLEEP) tp->pm_notifier.notifier_call = rtl_notifier; register_pm_notifier(&tp->pm_notifier); #endif @@ -3961,16 +8418,23 @@ static int rtl8152_close(struct net_device *netdev) struct r8152 *tp = netdev_priv(netdev); int res = 0; -#ifdef CONFIG_PM_SLEEP +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) && defined(CONFIG_PM_SLEEP) unregister_pm_notifier(&tp->pm_notifier); #endif - if (!test_bit(RTL8152_UNPLUG, &tp->flags)) - napi_disable(&tp->napi); + tasklet_disable(&tp->tx_tl); + napi_disable(&tp->napi); + smp_mb__before_atomic(); clear_bit(WORK_ENABLE, &tp->flags); + smp_mb__after_atomic(); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); netif_stop_queue(netdev); + if (unlikely(tp->rtk_enable_diag)) { + netif_err(tp, drv, tp->netdev, "rtk diag isn't disabled\n"); + rtk_disable_diag(tp); + } + res = usb_autopm_get_interface(tp->intf); if (res < 0 || test_bit(RTL8152_UNPLUG, &tp->flags)) { rtl_drop_queued_tx(tp); @@ -3979,7 +8443,9 @@ static int rtl8152_close(struct net_device *netdev) mutex_lock(&tp->control); tp->rtl_ops.down(tp); - +#if defined(RTL8152_S5_WOL) && defined(CONFIG_PM) + res = rtl_s5_wol(tp); +#endif mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); @@ -4007,6 +8473,18 @@ static void r8152b_init(struct r8152 *tp) if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; +#if 0 + /* Clear EP3 Fifo before using interrupt transfer */ + if (ocp_read_byte(tp, MCU_TYPE_USB, 0xb963) & 0x80) { + ocp_write_byte(tp, MCU_TYPE_USB, 0xb963, 0x08); + ocp_write_byte(tp, MCU_TYPE_USB, 0xb963, 0x40); + ocp_write_byte(tp, MCU_TYPE_USB, 0xb963, 0x00); + ocp_write_byte(tp, MCU_TYPE_USB, 0xb968, 0x00); + ocp_write_word(tp, MCU_TYPE_USB, 0xb010, 0x00e0); + ocp_write_byte(tp, MCU_TYPE_USB, 0xb963, 0x04); + } +#endif + data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; @@ -4022,6 +8500,7 @@ static void r8152b_init(struct r8152 *tp) } r8152_power_cut_en(tp, false); + rtl_runtime_suspend_enable(tp, false); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ocp_data |= TX_10M_IDLE_EN | PFM_PWM_SWITCH; @@ -4034,6 +8513,13 @@ static void r8152b_init(struct r8152 *tp) SPDWN_RXDV_MSK | SPDWN_LINKCHG_MSK; ocp_write_word(tp, MCU_TYPE_PLA, PLA_GPHY_INTR_IMR, ocp_data); + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_TIMER); + ocp_data |= BIT(15); + ocp_write_word(tp, MCU_TYPE_USB, USB_USB_TIMER, ocp_data); + ocp_write_word(tp, MCU_TYPE_USB, 0xcbfc, 0x03e8); + ocp_data &= ~BIT(15); + ocp_write_word(tp, MCU_TYPE_USB, USB_USB_TIMER, ocp_data); + rtl_tally_reset(tp); /* enable rx aggregation */ @@ -4106,6 +8592,20 @@ static void r8153_init(struct r8152 *tp) else ocp_data |= DYNAMIC_BURST; ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data); + + r8153_queue_wake(tp, false); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, 0xd398); + if (rtl8152_get_speed(tp) & LINK_STATUS) + ocp_data |= BIT(15); + else + ocp_data &= ~BIT(15); + + /* r8153_queue_wake() has set this bit */ + /* ocp_data &= ~BIT(8); */ + + ocp_data |= BIT(0); + ocp_write_word(tp, MCU_TYPE_PLA, 0xd398, ocp_data); } ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2); @@ -4135,10 +8635,19 @@ static void r8153_init(struct r8152 *tp) ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001); r8153_power_cut_en(tp, false); + rtl_runtime_suspend_enable(tp, false); r8153_u1u2en(tp, true); r8153_mac_clk_spd(tp, false); usb_enable_lpm(tp->udev); + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, 0xe90a); + ocp_data |= BIT(0); + ocp_write_byte(tp, MCU_TYPE_PLA, 0xe90a, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, 0xe007); + ocp_data &= ~BIT(7); + ocp_write_byte(tp, MCU_TYPE_PLA, 0xe007, ocp_data); + /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); @@ -4148,7 +8657,9 @@ static void r8153_init(struct r8152 *tp) switch (tp->udev->speed) { case USB_SPEED_SUPER: +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) case USB_SPEED_SUPER_PLUS: +#endif tp->coalesce = COALESCE_SUPER; break; case USB_SPEED_HIGH: @@ -4171,6 +8682,2166 @@ static void r8153b_init(struct r8152 *tp) r8153b_u1u2en(tp, false); + for (i = 0; i < 500; i++) { + if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & + AUTOLOAD_DONE) + break; + msleep(20); + } + + data = r8153_phy_status(tp, 0); + + data = r8152_mdio_read(tp, MII_BMCR); + if (data & BMCR_PDOWN) { + data &= ~BMCR_PDOWN; + r8152_mdio_write(tp, MII_BMCR, data); + } + + data = r8153_phy_status(tp, PHY_STAT_LAN_ON); + + r8153_u2p3en(tp, false); + + /* MSC timer = 0xfff * 8ms = 32760 ms */ + ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); + + r8153b_power_cut_en(tp, false); + r8153b_ups_en(tp, false); + r8153_queue_wake(tp, false); + rtl_runtime_suspend_enable(tp, false); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, 0xd398); + if (rtl8152_get_speed(tp) & LINK_STATUS) + ocp_data |= BIT(15); + else + ocp_data &= ~BIT(15); + + /* r8153_queue_wake() has set this bit */ + /* ocp_data &= ~BIT(8); */ + + ocp_data |= BIT(0); + ocp_write_word(tp, MCU_TYPE_PLA, 0xd398, ocp_data); + + if (tp->udev->descriptor.idVendor == VENDOR_ID_LENOVO && + tp->udev->descriptor.idProduct == 0x3069) + ocp_write_word(tp, MCU_TYPE_USB, USB_SSPHYLINK2, 0x0c8c); + + r8153b_u1u2en(tp, true); + usb_enable_lpm(tp->udev); + + /* MAC clock speed down */ + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); + ocp_data |= MAC_CLK_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data &= ~BIT(14); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + + if (tp->version == RTL_VER_09) { + if (ocp_read_byte(tp, MCU_TYPE_PLA, 0xdc00) & BIT(5)) { + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); + ocp_data |= TEST_IO_OFF; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); + } + } + + set_bit(GREEN_ETHERNET, &tp->flags); + + /* rx aggregation */ + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); + ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); + ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); + + rtl_tally_reset(tp); + + tp->coalesce = 15000; /* 15 us */ +} + +static void r8156_patch_code(struct r8152 *tp) +{ + if (tp->version == RTL_TEST_01) { + static u8 usb3_patch_t[] = { + 0x01, 0xe0, 0x05, 0xc7, + 0xf6, 0x65, 0x02, 0xc0, + 0x00, 0xb8, 0x40, 0x03, + 0x00, 0xd4, 0x00, 0x00 }; + + r8153b_clear_bp(tp, MCU_TYPE_USB); + + generic_ocp_write(tp, 0xe600, 0xff, sizeof(usb3_patch_t), + usb3_patch_t, MCU_TYPE_USB); + + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0xa000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_0, 0x033e); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_1, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_2, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_3, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_4, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_5, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_6, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_7, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_8, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_9, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_10, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_11, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_12, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_13, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_14, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_15, 0x0000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP2_EN, 0x0001); + + } else if (tp->version == RTL_VER_11) { + u32 ocp_data; + static u8 usb_patch3_b[] = { + 0x05, 0xe0, 0x31, 0xe0, + 0x77, 0xe0, 0x86, 0xe0, + 0x97, 0xe0, 0x2c, 0xc3, + 0x60, 0x70, 0x80, 0x49, + 0xfd, 0xf0, 0x27, 0xc3, + 0x66, 0x60, 0x80, 0x48, + 0x02, 0x48, 0x66, 0x88, + 0x00, 0x48, 0x82, 0x48, + 0x66, 0x88, 0x1b, 0xc3, + 0x60, 0x70, 0x17, 0xc4, + 0x88, 0x98, 0x14, 0xc0, + 0x8c, 0x98, 0x83, 0x18, + 0x8e, 0x88, 0x8e, 0x70, + 0x8f, 0x49, 0xfe, 0xf1, + 0x62, 0x70, 0x8a, 0x98, + 0x0d, 0xc0, 0x8c, 0x98, + 0x84, 0x18, 0x8e, 0x88, + 0x8e, 0x70, 0x8f, 0x49, + 0xfe, 0xf1, 0x08, 0xc3, + 0x02, 0xc4, 0x00, 0xbc, + 0x68, 0x0f, 0x6c, 0xe9, + 0x00, 0xdc, 0x50, 0xe8, + 0x30, 0xc1, 0x36, 0xd3, + 0x80, 0x10, 0x00, 0x00, + 0x80, 0xd4, 0x26, 0xd8, + 0x44, 0xc2, 0x4a, 0x41, + 0x94, 0x20, 0x42, 0xc0, + 0x16, 0x00, 0x00, 0x73, + 0x40, 0xc4, 0x5c, 0x41, + 0x8b, 0x41, 0x0b, 0x18, + 0x38, 0xc6, 0xc0, 0x88, + 0xc1, 0x99, 0x21, 0xe8, + 0x35, 0xc0, 0x00, 0x73, + 0xbd, 0x48, 0x0d, 0x18, + 0x30, 0xc6, 0xc0, 0x88, + 0xc1, 0x9b, 0x19, 0xe8, + 0x2d, 0xc0, 0x02, 0x73, + 0x35, 0x48, 0x0e, 0x18, + 0x28, 0xc6, 0xc0, 0x88, + 0xc1, 0x9b, 0x11, 0xe8, + 0xdf, 0xc3, 0xdd, 0xc6, + 0x01, 0x03, 0x1e, 0x40, + 0xfe, 0xf1, 0x20, 0xc0, + 0x02, 0x73, 0xb5, 0x48, + 0x0e, 0x18, 0x1b, 0xc6, + 0xc0, 0x88, 0xc1, 0x9b, + 0x04, 0xe8, 0x02, 0xc6, + 0x00, 0xbe, 0xb6, 0x10, + 0x00, 0xb4, 0x01, 0xb4, + 0x02, 0xb4, 0x03, 0xb4, + 0x10, 0xc3, 0x0e, 0xc2, + 0x61, 0x71, 0x40, 0x99, + 0x60, 0x60, 0x0e, 0x48, + 0x42, 0x98, 0x42, 0x70, + 0x8e, 0x49, 0xfe, 0xf1, + 0x03, 0xb0, 0x02, 0xb0, + 0x01, 0xb0, 0x00, 0xb0, + 0x80, 0xff, 0xc0, 0xd4, + 0x8f, 0xcb, 0xaa, 0xc7, + 0x1e, 0x00, 0x90, 0xc7, + 0x1f, 0xfe, 0x0a, 0x10, + 0x0c, 0xf0, 0x0b, 0x10, + 0x0a, 0xf0, 0x0d, 0x10, + 0x08, 0xf0, 0x0e, 0x10, + 0x06, 0xf0, 0x24, 0x10, + 0x04, 0xf0, 0x02, 0xc7, + 0x00, 0xbf, 0x58, 0x11, + 0x02, 0xc7, 0x00, 0xbf, + 0x62, 0x11, 0xec, 0xc0, + 0x02, 0x75, 0xd5, 0x48, + 0x0e, 0x18, 0xe7, 0xc6, + 0xc0, 0x88, 0xc1, 0x9d, + 0xd0, 0xef, 0x02, 0x75, + 0x55, 0x48, 0x0e, 0x18, + 0xe0, 0xc6, 0xc0, 0x88, + 0xc1, 0x9d, 0xc9, 0xef, + 0x02, 0xc7, 0x00, 0xbf, + 0x8e, 0x11, 0x16, 0xc0, + 0xbb, 0x21, 0xb9, 0x25, + 0x00, 0x71, 0x13, 0xc2, + 0x4a, 0x41, 0x8b, 0x41, + 0x24, 0x18, 0xd1, 0xc6, + 0xc0, 0x88, 0xc1, 0x99, + 0xba, 0xef, 0x0a, 0xc0, + 0x08, 0x71, 0x28, 0x18, + 0xca, 0xc6, 0xc0, 0x88, + 0xc1, 0x99, 0xb3, 0xef, + 0x02, 0xc0, 0x00, 0xb8, + 0x3c, 0x11, 0xd8, 0xc7, + 0x83, 0xff, 0x00, 0x00}; + static u8 pla_patch11[] = { + 0x02, 0xe0, 0x07, 0xe0, + 0x05, 0xc2, 0x40, 0x76, + 0x02, 0xc4, 0x00, 0xbc, + 0xd6, 0x0b, 0x1e, 0xfc, + 0x2a, 0xc5, 0xa0, 0x77, + 0x2a, 0xc5, 0x2b, 0xc4, + 0xa0, 0x9c, 0x26, 0xc5, + 0xa0, 0x64, 0x01, 0x14, + 0x0b, 0xf0, 0x02, 0x14, + 0x09, 0xf0, 0x01, 0x07, + 0xf1, 0x49, 0x06, 0xf0, + 0x21, 0xc7, 0xe0, 0x8e, + 0x11, 0x1e, 0xe0, 0x8e, + 0x14, 0xe0, 0x17, 0xc5, + 0x00, 0x1f, 0xa0, 0x9f, + 0x13, 0xc5, 0xa0, 0x77, + 0xa0, 0x74, 0x46, 0x48, + 0x47, 0x48, 0xa0, 0x9c, + 0x11, 0xc5, 0xa0, 0x74, + 0x44, 0x48, 0x43, 0x48, + 0xa0, 0x9c, 0x08, 0xc5, + 0xa0, 0x9f, 0x02, 0xc5, + 0x00, 0xbd, 0xea, 0x03, + 0x02, 0xc5, 0x00, 0xbd, + 0xf6, 0x03, 0x1c, 0xe8, + 0xaa, 0xd3, 0x08, 0xb7, + 0x6c, 0xe8, 0x20, 0xe8, + 0x00, 0xa0, 0x38, 0xe4}; + + r8153b_clear_bp(tp, MCU_TYPE_USB); + + generic_ocp_write(tp, 0xe600, 0xff, sizeof(usb_patch3_b), + usb_patch3_b, MCU_TYPE_USB); + + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0xa000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_0, 0x0f66); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_1, 0x1098); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_2, 0x1148); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_3, 0x116c); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_4, 0x10e0); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_5, 0x0000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_6, 0x0000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_7, 0x0000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_8, 0x0000); +// ocp_write_word(tp, MCU_TYPE_USB, USB_BP_9, 0x0000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_10, 0x0000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_11, 0x0000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_12, 0x0000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_13, 0x0000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_14, 0x0000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_15, 0x0000); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP2_EN, 0x001f); + ocp_write_byte(tp, MCU_TYPE_USB, 0xcfd7, 0x03); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, 0xcfcc); + ocp_data &= ~BIT(9); + ocp_write_word(tp, MCU_TYPE_USB, 0xcfcc, ocp_data); + + ocp_write_dword(tp, MCU_TYPE_USB, 0xd480, 0x4026840e); + ocp_write_dword(tp, MCU_TYPE_USB, 0xd480, 0x4001acc9); + + r8153b_clear_bp(tp, MCU_TYPE_PLA); + + generic_ocp_write(tp, 0xf800, 0xff, sizeof(pla_patch11), + pla_patch11, MCU_TYPE_PLA); + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0x8000); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_0, 0x0bc2); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_1, 0x03e0); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_2, 0x0000); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_3, 0x0000); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_4, 0x0000); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_5, 0x0000); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_6, 0x0000); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_7, 0x0000); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_EN, 0x0003); + ocp_write_byte(tp, MCU_TYPE_USB, 0xcfd6, 0x02); + } +} + +static void r8156_ram_code(struct r8152 *tp) +{ + u16 data; + + if (tp->version == RTL_VER_10) { + r8153_pre_ram_code(tp, 0x8024, 0x8600); + + data = ocp_reg_read(tp, 0xb820); + data |= BIT(7); + ocp_reg_write(tp, 0xb820, data); + + /* nc0_patch_6486_180504_usb */ + sram_write(tp, 0xA016, 0x0000); + sram_write(tp, 0xA012, 0x0000); + sram_write(tp, 0xA014, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8013); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8021); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x802f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x803d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8042); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8051); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8051); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa088); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0a50); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8008); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd014); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1a3); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x401a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd707); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x40c2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x60a6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5f8b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0a86); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0a6c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8080); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd019); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1a2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x401a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd707); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x40c4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x60a6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5f8b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0a86); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0a84); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd503); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8970); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c07); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0901); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xcf09); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd705); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xceff); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaf0a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd504); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1213); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8401); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8580); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1253); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd064); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd181); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd704); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4018); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd504); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xc50f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd706); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2c59); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x804d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xc60f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf002); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xc605); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xae02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x10fd); + sram_write(tp, 0xA026, 0xffff); + sram_write(tp, 0xA024, 0xffff); + sram_write(tp, 0xA022, 0x10f4); + sram_write(tp, 0xA020, 0x1252); + sram_write(tp, 0xA006, 0x1206); + sram_write(tp, 0xA004, 0x0a78); + sram_write(tp, 0xA002, 0x0a60); + sram_write(tp, 0xA000, 0x0a4f); + sram_write(tp, 0xA008, 0x3f00); + + /* nc1_patch_6486_180423_cml_usb */ + sram_write(tp, 0xA016, 0x0010); + sram_write(tp, 0xA012, 0x0000); + sram_write(tp, 0xA014, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8066); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x807c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8089); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x808e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x80a0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x80b2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x80c2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd501); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x62db); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x655c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd73e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x60e9); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x614a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x61ab); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0501); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0304); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0503); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0304); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0505); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0304); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0509); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0304); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x653c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd73e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x60e9); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x614a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x61ab); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0503); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0304); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0502); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0304); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0506); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0304); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x050a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0304); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd73e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x60e9); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x614a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x61ab); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0505); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0304); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0506); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0304); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0504); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0304); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x050c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0304); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd73e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x60e9); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x614a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x61ab); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0509); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0304); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x050a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0304); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x050c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0304); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0508); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0304); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd501); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd73e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x60e9); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x614a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x61ab); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0501); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0321); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0502); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0321); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0504); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0321); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0508); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0321); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0346); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd501); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8208); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x609d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa50f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x001a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0503); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x001a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x607d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00ab); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00ab); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd501); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x60fd); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa50f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce00); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaa0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x017b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0503); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce00); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0a05); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x017b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd501); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x60fd); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa50f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce00); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaa0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x01e0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0503); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce00); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0a05); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x01e0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x60fd); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa50f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce00); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaa0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0231); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0503); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce00); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0a05); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0231); + sram_write(tp, 0xA08E, 0xffff); + sram_write(tp, 0xA08C, 0x0221); + sram_write(tp, 0xA08A, 0x01ce); + sram_write(tp, 0xA088, 0x0169); + sram_write(tp, 0xA086, 0x00a6); + sram_write(tp, 0xA084, 0x000d); + sram_write(tp, 0xA082, 0x0308); + sram_write(tp, 0xA080, 0x029f); + sram_write(tp, 0xA090, 0x007f); + + /* nc2_patch_6486_180508_usb */ + sram_write(tp, 0xA016, 0x0020); + sram_write(tp, 0xA012, 0x0000); + sram_write(tp, 0xA014, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8017); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x801b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8029); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8054); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x805a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8064); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x80a7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x9430); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x9480); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb408); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd120); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd057); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x064b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xcb80); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x9906); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0567); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xcb94); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8190); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x82a0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x800a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8406); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa740); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8dff); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x07e4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa840); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0773); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xcb91); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4063); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd139); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf002); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd140); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd040); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb404); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d00); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x07dc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa610); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa110); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa2a0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa404); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd704); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4045); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa180); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd704); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x405d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa720); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0742); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x07ec); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5f74); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0742); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd702); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x7fb6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8190); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x82a0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8404); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8610); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x07dc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x064b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x07c0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5fa7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0481); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x94bc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x870c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa190); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa00a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa280); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa404); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8220); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x078e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xcb92); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa840); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4063); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd140); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf002); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd150); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd040); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd703); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x60a0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x6121); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x61a2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x6223); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf02f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0cf0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d10); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa740); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf00f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0cf0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d20); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa740); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf00a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0cf0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d30); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa740); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf005); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0cf0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d40); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa740); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x07e4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa610); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa008); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd704); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4046); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa002); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd704); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x405d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa720); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0742); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x07f7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5f74); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0742); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd702); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x7fb5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x800a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0cf0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d00); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x07e4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa740); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd701); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3ad4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0537); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8610); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8840); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x064b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8301); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x800a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8190); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x82a0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8404); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa70c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x9402); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x890c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8840); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x064b); + sram_write(tp, 0xA10E, 0x0642); + sram_write(tp, 0xA10C, 0x0686); + sram_write(tp, 0xA10A, 0x0788); + sram_write(tp, 0xA108, 0x047b); + sram_write(tp, 0xA106, 0x065c); + sram_write(tp, 0xA104, 0x0769); + sram_write(tp, 0xA102, 0x0565); + sram_write(tp, 0xA100, 0x06f9); + sram_write(tp, 0xA110, 0x00ff); + + /* uc2_patch_6486_180507_usb */ + sram_write(tp, 0xb87c, 0x8530); + sram_write(tp, 0xb87e, 0xaf85); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3caf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8593); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaf85); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x9caf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x85a5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf86); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd702); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5afb); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe083); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfb0c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x020d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x021b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x10bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x86d7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x025a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb7bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x86da); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x025a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfbe0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x83fc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1b10); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf86); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xda02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ab7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf86); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xdd02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5afb); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe083); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfd0c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x020d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x021b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x10bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x86dd); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x025a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb7bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x86e0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x025a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfbe0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x83fe); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1b10); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf86); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe002); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ab7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaf2f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbd02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2cac); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0286); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x65af); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x212b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x022c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x6002); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x86b6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaf21); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0cd1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x03bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8710); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x025a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb7bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x870d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x025a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb7bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8719); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x025a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb7bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8716); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x025a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb7bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x871f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x025a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb7bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x871c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x025a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb7bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8728); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x025a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb7bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8725); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x025a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb7bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8707); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x025a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfbad); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x281c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd100); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf87); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0a02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ab7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf87); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1302); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ab7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf87); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2202); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ab7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf87); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2b02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ab7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xae1a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd101); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf87); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0a02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ab7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf87); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1302); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ab7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf87); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2202); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ab7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf87); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2b02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ab7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd101); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf87); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3402); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ab7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf87); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3102); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ab7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf87); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3d02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ab7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf87); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3a02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ab7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf87); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4302); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ab7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf87); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4002); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ab7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf87); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4c02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ab7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf87); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4902); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ab7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd100); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf87); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2e02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ab7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf87); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3702); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ab7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf87); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4602); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ab7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf87); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4f02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ab7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaf35); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x7ff8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfaef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x69bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x86e3); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x025a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfbbf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x86fb); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x025a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb7bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x86e6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x025a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfbbf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x86fe); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x025a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb7bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x86e9); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x025a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfbbf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8701); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x025a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb7bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x86ec); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x025a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfbbf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8704); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x025a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb7bf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x86ef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0262); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x7cbf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x86f2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0262); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x7cbf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x86f5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0262); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x7cbf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x86f8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0262); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x7cef); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x96fe); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfc04); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf8fa); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef69); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf86); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x6273); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf86); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf202); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x6273); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf86); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf502); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x6273); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbf86); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf802); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x6273); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef96); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfefc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0420); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb540); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x53b5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4086); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb540); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb9b5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x40c8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb03a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xc8b0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbac8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb13a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xc8b1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xba77); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbd26); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xffbd); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2677); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbd28); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xffbd); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2840); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbd26); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xc8bd); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x2640); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbd28); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xc8bd); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x28bb); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa430); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x98b0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1eba); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb01e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xdcb0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1e98); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb09e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbab0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x9edc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb09e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x98b1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1eba); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb11e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xdcb1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1e98); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb19e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbab1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x9edc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb19e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x11b0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1e22); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb01e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x33b0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1e11); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb09e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x22b0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x9e33); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb09e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x11b1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1e22); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb11e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x33b1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1e11); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb19e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x22b1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x9e33); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb19e); + sram_write(tp, 0xb85e, 0x2f71); + sram_write(tp, 0xb860, 0x20d9); + sram_write(tp, 0xb862, 0x2109); + sram_write(tp, 0xb864, 0x34e7); + sram_write(tp, 0xb878, 0x000f); + + data = ocp_reg_read(tp, 0xb820); + data &= ~BIT(7); + ocp_reg_write(tp, 0xb820, data); + + r8153_post_ram_code(tp, 0x8024); + } else if (tp->version == RTL_VER_11) { + r8153_pre_ram_code(tp, 0x8024, 0x8601); + + data = ocp_reg_read(tp, 0xb820); + data |= BIT(7); + ocp_reg_write(tp, 0xb820, data); + + /* nc_patch */ + sram_write(tp, 0xA016, 0x0000); + sram_write(tp, 0xA012, 0x0000); + sram_write(tp, 0xA014, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x808b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x808f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8093); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8097); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x809d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x80a1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x80aa); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd718); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x607b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x40da); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf00e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x42da); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf01e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd718); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x615b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1456); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x14a4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x14bc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd718); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5f2e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf01c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1456); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x14a4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x14bc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd718); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5f2e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf024); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1456); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x14a4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x14bc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd718); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5f2e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf02c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1456); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x14a4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x14bc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd718); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5f2e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf034); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd719); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4118); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd504); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xac11); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd501); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa410); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce00); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4779); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd504); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xac0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xae01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1444); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf034); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd719); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4118); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd504); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xac22); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd501); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa420); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce00); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4559); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd504); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xac0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xae01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1444); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf023); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd719); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4118); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd504); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xac44); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd501); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa440); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce00); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4339); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd504); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xac0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xae01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1444); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf012); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd719); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4118); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd504); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xac88); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd501); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa480); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xce00); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4119); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd504); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xac0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xae01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1444); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf001); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1456); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd718); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5fac); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xc48f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x141b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd504); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x121a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd0b4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1bb); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0898); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd0b4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1bb); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0a0e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd064); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd18a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0b7e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x401c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd501); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa804); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8804); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x053b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd500); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa301); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0648); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xc520); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa201); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd701); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x252d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1646); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd708); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4006); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1646); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0308); + sram_write(tp, 0xA026, 0x0307); + sram_write(tp, 0xA024, 0x1645); + sram_write(tp, 0xA022, 0x0647); + sram_write(tp, 0xA020, 0x053a); + sram_write(tp, 0xA006, 0x0b7c); + sram_write(tp, 0xA004, 0x0a0c); + sram_write(tp, 0xA002, 0x0896); + sram_write(tp, 0xA000, 0x11a1); + sram_write(tp, 0xA008, 0xff00); + + /* nc1_patch */ + sram_write(tp, 0xA016, 0x0010); + sram_write(tp, 0xA012, 0x0000); + sram_write(tp, 0xA014, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8015); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x801a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x801a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x801a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x801a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x801a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x801a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xad02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x02d7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00ed); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0509); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xc100); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x008f); + sram_write(tp, 0xA08E, 0xffff); + sram_write(tp, 0xA08C, 0xffff); + sram_write(tp, 0xA08A, 0xffff); + sram_write(tp, 0xA088, 0xffff); + sram_write(tp, 0xA086, 0xffff); + sram_write(tp, 0xA084, 0xffff); + sram_write(tp, 0xA082, 0x008d); + sram_write(tp, 0xA080, 0x00eb); + sram_write(tp, 0xA090, 0x0103); + + /* nc2_patch */ + sram_write(tp, 0xA016, 0x0020); + sram_write(tp, 0xA012, 0x0000); + sram_write(tp, 0xA014, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8014); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8018); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8024); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8051); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8055); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8072); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x80dc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfffd); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfffd); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8301); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x800a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8190); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x82a0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8404); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa70c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x9402); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x890c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8840); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa380); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x066e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xcb91); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4063); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd139); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf002); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd140); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd040); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb404); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d00); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x07e0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa610); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa110); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa2a0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa404); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd704); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4085); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa180); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa404); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8280); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd704); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x405d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa720); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0743); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x07f0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5f74); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0743); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd702); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x7fb6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8190); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x82a0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8404); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8610); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0c0f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x07e0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x066e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd158); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd04d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x03d4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x94bc); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x870c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8380); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd10d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd040); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x07c4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5fb4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa190); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa00a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa280); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa404); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa220); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd130); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd040); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x07c4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5fb4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xbb80); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1c4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd074); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa301); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd704); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x604b); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa90c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0556); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xcb92); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4063); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd116); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf002); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd119); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd040); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd703); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x60a0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x6241); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x63e2); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x6583); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf054); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd701); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x611e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd701); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x40da); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0cf0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d10); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8740); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf02f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0cf0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d50); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa740); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf02a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd701); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x611e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd701); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x40da); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0cf0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d20); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8740); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf021); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0cf0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d60); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa740); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf01c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd701); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x611e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd701); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x40da); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0cf0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d30); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8740); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf013); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0cf0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d70); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa740); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf00e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd701); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x611e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd701); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x40da); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0cf0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d40); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8740); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf005); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0cf0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d80); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa740); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x07e8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa610); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd704); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x405d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa720); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd700); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x5ff4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa008); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd704); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x4046); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa002); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0743); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x07fb); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd703); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x7f6f); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x7f4e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x7f2d); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x7f0c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x800a); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0cf0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0d00); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x07e8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8010); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa740); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0743); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd702); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x7fb5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd701); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3ad4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0556); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8610); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x066e); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd1f5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xd049); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x1800); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x01ec); + sram_write(tp, 0xA10E, 0x01ea); + sram_write(tp, 0xA10C, 0x06a9); + sram_write(tp, 0xA10A, 0x078a); + sram_write(tp, 0xA108, 0x03d2); + sram_write(tp, 0xA106, 0x067f); + sram_write(tp, 0xA104, 0x0665); + sram_write(tp, 0xA102, 0x0000); + sram_write(tp, 0xA100, 0x0000); + sram_write(tp, 0xA110, 0x00fc); + + /* uc2 */ + sram_write(tp, 0xb87c, 0x8530); + sram_write(tp, 0xb87e, 0xaf85); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x3caf); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8545); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaf85); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x45af); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8545); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xee82); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xf900); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0103); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xaf03); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb7f8); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe0a6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00e1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa601); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xef01); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x58f0); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa080); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x37a1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8402); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xae16); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa185); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x02ae); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x11a1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8702); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xae0c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xa188); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x02ae); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x07a1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x8902); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xae02); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xae1c); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe0b4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x62e1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb463); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x6901); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe4b4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x62e5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb463); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe0b4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x62e1); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb463); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x6901); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xe4b4); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x62e5); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xb463); + ocp_reg_write(tp, OCP_SRAM_DATA, 0xfc04); + sram_write(tp, 0xb85e, 0x03b3); + sram_write(tp, 0xb860, 0xffff); + sram_write(tp, 0xb862, 0xffff); + sram_write(tp, 0xb864, 0xffff); + sram_write(tp, 0xb878, 0x0001); + + data = ocp_reg_read(tp, 0xb820); + data &= ~BIT(7); + ocp_reg_write(tp, 0xb820, data); + + r8153_post_ram_code(tp, 0x8024); + } +} + +static void r8156_hw_phy_cfg2(struct r8152 *tp) +{ + bool pcut_entr; + u32 ocp_data; + u16 data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); + pcut_entr = (ocp_data & PCUT_STATUS) ? true : false; + if (pcut_entr) { + ocp_data &= ~PCUT_STATUS; + ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); + + data = r8153_phy_status(tp, PHY_STAT_EXT_INIT); + WARN_ON_ONCE(data != PHY_STAT_EXT_INIT); + + r8156_ram_code(tp); + + data = ocp_reg_read(tp, 0xa468); + data &= ~(BIT(3) | BIT(0)); + ocp_reg_write(tp, 0xa468, data); + } else { + r8156_patch_code(tp); + + if (r8153_patch_request(tp, true)) { + netif_err(tp, drv, tp->netdev, + "patch request error\n"); + return; + } + + r8156_ram_code(tp); + + r8153_patch_request(tp, false); + + /* disable ALDPS before updating the PHY parameters */ + r8153_aldps_en(tp, false); + + /* disable EEE before updating the PHY parameters */ + r8153_eee_en(tp, false); + ocp_reg_write(tp, OCP_EEE_ADV, 0); + } + + data = r8153_phy_status(tp, PHY_STAT_LAN_ON); + WARN_ON_ONCE(data != PHY_STAT_LAN_ON); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); + ocp_data |= PFM_PWM_SWITCH; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); + + switch (tp->version) { + case RTL_VER_10: + data = ocp_reg_read(tp, 0xad40); + data &= ~0x3ff; + data |= BIT(7) | BIT(2); + ocp_reg_write(tp, 0xad40, data); + + data = ocp_reg_read(tp, 0xad4e); + data |= BIT(4); + ocp_reg_write(tp, 0xad4e, data); + data = ocp_reg_read(tp, 0xad16); + data &= ~0x3ff; + data |= 0x6; + ocp_reg_write(tp, 0xad16, data); + data = ocp_reg_read(tp, 0xad32); + data &= ~0x3f; + data |= 6; + ocp_reg_write(tp, 0xad32, data); + data = ocp_reg_read(tp, 0xac08); + data &= ~(BIT(12) | BIT(8)); + ocp_reg_write(tp, 0xac08, data); + data = ocp_reg_read(tp, 0xac8a); + data |= BIT(12) | BIT(13) | BIT(14); + data &= ~BIT(15); + ocp_reg_write(tp, 0xac8a, data); + data = ocp_reg_read(tp, 0xad18); + data |= BIT(10); + ocp_reg_write(tp, 0xad18, data); + data = ocp_reg_read(tp, 0xad1a); + data |= 0x3ff; + ocp_reg_write(tp, 0xad1a, data); + data = ocp_reg_read(tp, 0xad1c); + data |= 0x3ff; + ocp_reg_write(tp, 0xad1c, data); + + data = sram_read(tp, 0x80ea); + data &= ~0xff00; + data |= 0xc400; + sram_write(tp, 0x80ea, data); + data = sram_read(tp, 0x80eb); + data &= ~0x0700; + data |= 0x0300; + sram_write(tp, 0x80eb, data); + data = sram_read(tp, 0x80f8); + data &= ~0xff00; + data |= 0x1c00; + sram_write(tp, 0x80f8, data); + data = sram_read(tp, 0x80f1); + data &= ~0xff00; + data |= 0x3000; + sram_write(tp, 0x80f1, data); + + data = sram_read(tp, 0x80fe); + data &= ~0xff00; + data |= 0xa500; + sram_write(tp, 0x80fe, data); + data = sram_read(tp, 0x8102); + data &= ~0xff00; + data |= 0x5000; + sram_write(tp, 0x8102, data); + data = sram_read(tp, 0x8015); + data &= ~0xff00; + data |= 0x3300; + sram_write(tp, 0x8015, data); + data = sram_read(tp, 0x8100); + data &= ~0xff00; + data |= 0x7000; + sram_write(tp, 0x8100, data); + data = sram_read(tp, 0x8014); + data &= ~0xff00; + data |= 0xf000; + sram_write(tp, 0x8014, data); + data = sram_read(tp, 0x8016); + data &= ~0xff00; + data |= 0x6500; + sram_write(tp, 0x8016, data); + data = sram_read(tp, 0x80dc); + data &= ~0xff00; + data |= 0xed00; + sram_write(tp, 0x80dc, data); + data = sram_read(tp, 0x80df); + data |= BIT(8); + sram_write(tp, 0x80df, data); + data = sram_read(tp, 0x80e1); + data &= ~BIT(8); + sram_write(tp, 0x80e1, data); + + data = ocp_reg_read(tp, 0xbf06); + data &= ~0x003f; + data |= 0x0038; + ocp_reg_write(tp, 0xbf06, data); + + sram_write(tp, 0x819f, 0xddb6); + + ocp_reg_write(tp, 0xbc34, 0x5555); + data = ocp_reg_read(tp, 0xbf0a); + data &= ~0x0e00; + data |= 0x0a00; + ocp_reg_write(tp, 0xbf0a, data); + + data = ocp_reg_read(tp, 0xbd2c); + data &= ~BIT(13); + ocp_reg_write(tp, 0xbd2c, data); + break; + case RTL_VER_11: + data = ocp_reg_read(tp, 0xad4e); + data |= BIT(4); + ocp_reg_write(tp, 0xad4e, data); + data = ocp_reg_read(tp, 0xad16); + data |= 0x3ff; + ocp_reg_write(tp, 0xad16, data); + data = ocp_reg_read(tp, 0xad32); + data &= ~0x3f; + data |= 6; + ocp_reg_write(tp, 0xad32, data); + data = ocp_reg_read(tp, 0xac08); + data &= ~(BIT(12) | BIT(8)); + ocp_reg_write(tp, 0xac08, data); + data = ocp_reg_read(tp, 0xacc0); + data &= ~0x3; + data |= BIT(1); + ocp_reg_write(tp, 0xacc0, data); + data = ocp_reg_read(tp, 0xad40); + data &= ~0xe7; + data |= BIT(6) | BIT(2); + ocp_reg_write(tp, 0xad40, data); + data = ocp_reg_read(tp, 0xac14); + data &= ~BIT(7); + ocp_reg_write(tp, 0xac14, data); + data = ocp_reg_read(tp, 0xac80); + data &= ~(BIT(8) | BIT(9)); + ocp_reg_write(tp, 0xac80, data); + data = ocp_reg_read(tp, 0xac5e); + data &= ~0x7; + data |= BIT(1); + ocp_reg_write(tp, 0xac5e, data); + ocp_reg_write(tp, 0xad4c, 0x00a8); + data = ocp_reg_read(tp, 0xac5c); + ocp_reg_write(tp, 0xac5c, 0x01ff); + data = ocp_reg_read(tp, 0xac8a); + data &= ~0xf0; + data |= BIT(4) | BIT(5); + ocp_reg_write(tp, 0xac8a, data); + ocp_reg_write(tp, 0xb87c, 0x80a2); + ocp_reg_write(tp, 0xb87e, 0x0153); + ocp_reg_write(tp, 0xb87c, 0x809c); + ocp_reg_write(tp, 0xb87e, 0x0153); + + ocp_write_word(tp, MCU_TYPE_PLA, 0xe058, 0x0056); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, 0xe952); + ocp_data |= BIT(1) | BIT(2); + ocp_write_word(tp, MCU_TYPE_PLA, 0xe952, ocp_data); + + ocp_reg_write(tp, OCP_SRAM_ADDR, 0x81B3); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0043); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00A7); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00D6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00EC); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00F6); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00FB); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00FD); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00FF); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x00BB); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0058); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0029); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0013); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0009); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0004); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0002); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + ocp_reg_write(tp, OCP_SRAM_DATA, 0x0000); + + sram_write(tp, 0x8257, 0x020f); + sram_write(tp, 0x80ea, 0x7843); + if (r8153_patch_request(tp, true)) { + netif_err(tp, drv, tp->netdev, + "patch request error\n"); + return; + } + + data = ocp_reg_read(tp, 0xb896); + data &= ~BIT(0); + ocp_reg_write(tp, 0xb896, data); + + data = ocp_reg_read(tp, 0xb892); + data &= ~0xff00; + ocp_reg_write(tp, 0xb892, data); + + ocp_reg_write(tp, 0xB88E, 0xC091); + ocp_reg_write(tp, 0xB890, 0x6E12); + ocp_reg_write(tp, 0xB88E, 0xC092); + ocp_reg_write(tp, 0xB890, 0x1214); + ocp_reg_write(tp, 0xB88E, 0xC094); + ocp_reg_write(tp, 0xB890, 0x1516); + ocp_reg_write(tp, 0xB88E, 0xC096); + ocp_reg_write(tp, 0xB890, 0x171B); + ocp_reg_write(tp, 0xB88E, 0xC098); + ocp_reg_write(tp, 0xB890, 0x1B1C); + ocp_reg_write(tp, 0xB88E, 0xC09A); + ocp_reg_write(tp, 0xB890, 0x1F1F); + ocp_reg_write(tp, 0xB88E, 0xC09C); + ocp_reg_write(tp, 0xB890, 0x2021); + ocp_reg_write(tp, 0xB88E, 0xC09E); + ocp_reg_write(tp, 0xB890, 0x2224); + ocp_reg_write(tp, 0xB88E, 0xC0A0); + ocp_reg_write(tp, 0xB890, 0x2424); + ocp_reg_write(tp, 0xB88E, 0xC0A2); + ocp_reg_write(tp, 0xB890, 0x2424); + ocp_reg_write(tp, 0xB88E, 0xC0A4); + ocp_reg_write(tp, 0xB890, 0x2424); + ocp_reg_write(tp, 0xB88E, 0xC018); + ocp_reg_write(tp, 0xB890, 0x0AF2); + ocp_reg_write(tp, 0xB88E, 0xC01A); + ocp_reg_write(tp, 0xB890, 0x0D4A); + ocp_reg_write(tp, 0xB88E, 0xC01C); + ocp_reg_write(tp, 0xB890, 0x0F26); + ocp_reg_write(tp, 0xB88E, 0xC01E); + ocp_reg_write(tp, 0xB890, 0x118D); + ocp_reg_write(tp, 0xB88E, 0xC020); + ocp_reg_write(tp, 0xB890, 0x14F3); + ocp_reg_write(tp, 0xB88E, 0xC022); + ocp_reg_write(tp, 0xB890, 0x175A); + ocp_reg_write(tp, 0xB88E, 0xC024); + ocp_reg_write(tp, 0xB890, 0x19C0); + ocp_reg_write(tp, 0xB88E, 0xC026); + ocp_reg_write(tp, 0xB890, 0x1C26); + ocp_reg_write(tp, 0xB88E, 0xC089); + ocp_reg_write(tp, 0xB890, 0x6050); + ocp_reg_write(tp, 0xB88E, 0xC08A); + ocp_reg_write(tp, 0xB890, 0x5F6E); + ocp_reg_write(tp, 0xB88E, 0xC08C); + ocp_reg_write(tp, 0xB890, 0x6E6E); + ocp_reg_write(tp, 0xB88E, 0xC08E); + ocp_reg_write(tp, 0xB890, 0x6E6E); + ocp_reg_write(tp, 0xB88E, 0xC090); + ocp_reg_write(tp, 0xB890, 0x6E12); + + data = ocp_reg_read(tp, 0xb896); + data |= BIT(0); + ocp_reg_write(tp, 0xb896, data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); + ocp_data |= EEE_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); + + data = ocp_reg_read(tp, OCP_DOWN_SPEED); + data &= ~(EN_EEE_100 | EN_EEE_1000); + data |= EN_10M_CLKDIV; + ocp_reg_write(tp, OCP_DOWN_SPEED, data); + tp->ups_info._10m_ckdiv = true; + tp->ups_info.eee_plloff_100 = false; + tp->ups_info.eee_plloff_giga = false; + + data = ocp_reg_read(tp, OCP_POWER_CFG); + data &= ~EEE_CLKDIV_EN; + ocp_reg_write(tp, OCP_POWER_CFG, data); + tp->ups_info.eee_ckdiv = false; + + ocp_reg_write(tp, OCP_SYSCLK_CFG, 0); + ocp_reg_write(tp, OCP_SYSCLK_CFG, clk_div_expo(5)); + tp->ups_info._250m_ckdiv = false; + + r8153_patch_request(tp, false); + + data = ocp_reg_read(tp, 0xd068); + data |= BIT(13); + ocp_reg_write(tp, 0xd068, data); + + data = sram_read(tp, 0x81a2); + data &= ~BIT(8); + sram_write(tp, 0x81a2, data); + data = ocp_reg_read(tp, 0xb54c); + data &= ~0xff00; + data |= 0xdb00; + ocp_reg_write(tp, 0xb54c, data); + + data = ocp_reg_read(tp, 0xa454); + data &= ~BIT(0); + ocp_reg_write(tp, 0xa454, data); + + data = ocp_reg_read(tp, 0xa5d4); + data |= BIT(5); + ocp_reg_write(tp, 0xa5d4, data); + data = ocp_reg_read(tp, 0xad4e); + data &= ~BIT(4); + ocp_reg_write(tp, 0xad4e, data); + data = ocp_reg_read(tp, 0xa86a); + data &= ~BIT(0); + ocp_reg_write(tp, 0xa86a, data); + break; + default: + break; + } + + if (!pcut_entr) { + data = ocp_reg_read(tp, 0xa428); + data &= ~BIT(9); + ocp_reg_write(tp, 0xa428, data); + data = ocp_reg_read(tp, 0xa5ea); + data &= ~BIT(0); + ocp_reg_write(tp, 0xa5ea, data); + tp->ups_info.lite_mode = 0; + + if (tp->eee_en) { + r8156_eee_en(tp, true); + ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv); + } + + r8153_aldps_en(tp, true); + r8152b_enable_fc(tp); + r8153_u2p3en(tp, true); + + set_bit(PHY_RESET, &tp->flags); + } +} + +static void r8156_hw_phy_cfg(struct r8152 *tp) +{ + u32 ocp_data; + u16 data; + + data = r8153_phy_status(tp, PHY_STAT_LAN_ON); + + /* disable ALDPS before updating the PHY parameters */ + r8153_aldps_en(tp, false); + + /* disable EEE before updating the PHY parameters */ + r8153_eee_en(tp, false); + ocp_reg_write(tp, OCP_EEE_ADV, 0); + + r8156_firmware(tp); + + data = ocp_reg_read(tp, 0xa5d4); + data |= BIT(7) | BIT(0); + ocp_reg_write(tp, 0xa5d4, data); + ocp_reg_write(tp, 0xa5e6, 0x6290); + + data = ocp_reg_read(tp, 0xa5e8); + data &= ~BIT(3); + ocp_reg_write(tp, 0xa5e8, data); + data = ocp_reg_read(tp, 0xa428); + data |= BIT(9); + ocp_reg_write(tp, 0xa428, data); + + ocp_reg_write(tp, 0xb636, 0x2c00); + data = ocp_reg_read(tp, 0xb460); + data &= ~BIT(13); + ocp_reg_write(tp, 0xb460, data); + ocp_reg_write(tp, 0xb83e, 0x00a9); + ocp_reg_write(tp, 0xb840, 0x0035); + ocp_reg_write(tp, 0xb680, 0x0022); + ocp_reg_write(tp, 0xb468, 0x10c0); + ocp_reg_write(tp, 0xb468, 0x90c0); + + data = ocp_reg_read(tp, 0xb60a); + data &= ~0xfff; + data |= 0xc0; + ocp_reg_write(tp, 0xb60a, data); + data = ocp_reg_read(tp, 0xb628); + data &= ~0xfff; + data |= 0xc0; + ocp_reg_write(tp, 0xb628, data); + data = ocp_reg_read(tp, 0xb62a); + data &= ~0xfff; + data |= 0xc0; + ocp_reg_write(tp, 0xb62a, data); + + data = ocp_reg_read(tp, 0xbc1e); + data &= 0xf; + data |= (data << 4) | (data << 8) | (data << 12); + ocp_reg_write(tp, 0xbce0, data); + data = ocp_reg_read(tp, 0xbd42); + data &= ~BIT(8); + ocp_reg_write(tp, 0xbd42, data); + + data = ocp_reg_read(tp, 0xbf90); + data &= ~0xf0; + data |= BIT(7); + ocp_reg_write(tp, 0xbf90, data); + data = ocp_reg_read(tp, 0xbf92); + data &= ~0x3f; + data |= 0x3fc0; + ocp_reg_write(tp, 0xbf92, data); + + data = ocp_reg_read(tp, 0xbf94); + data |= 0x3e00; + ocp_reg_write(tp, 0xbf94, data); + data = ocp_reg_read(tp, 0xbf88); + data &= ~0x3eff; + data |= 0x1e01; + ocp_reg_write(tp, 0xbf88, data); + + data = ocp_reg_read(tp, 0xbc58); + data &= ~BIT(1); + ocp_reg_write(tp, 0xbc58, data); + + data = ocp_reg_read(tp, 0xbd0c); + data &= ~0x3f; + ocp_reg_write(tp, 0xbd0c, data); + + data = ocp_reg_read(tp, 0xbcc2); + data &= ~BIT(14); + ocp_reg_write(tp, 0xbcc2, data); + + ocp_reg_write(tp, 0xd098, 0x0427); + + data = ocp_reg_read(tp, 0xa430); + data &= ~BIT(12); + ocp_reg_write(tp, 0xa430, data); + + ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, 0xe84c); + ocp_data |= BIT(6); + ocp_write_dword(tp, MCU_TYPE_PLA, 0xe84c, ocp_data); + + data = ocp_reg_read(tp, 0xbeb4); + data &= ~BIT(1); + ocp_reg_write(tp, 0xbeb4, data); + data = ocp_reg_read(tp, 0xbf0c); + data &= ~BIT(13); + data |= BIT(12); + ocp_reg_write(tp, 0xbf0c, data); + data = ocp_reg_read(tp, 0xbd44); + data &= ~BIT(2); + ocp_reg_write(tp, 0xbd44, data); + + data = ocp_reg_read(tp, 0xa442); + data |= BIT(11); + ocp_reg_write(tp, 0xa442, data); + ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, 0xe84c); + ocp_data |= BIT(7); + ocp_write_dword(tp, MCU_TYPE_PLA, 0xe84c, ocp_data); + + r8156_lock_mian(tp, true); + data = ocp_reg_read(tp, 0xcc46); + data &= ~0x700; + ocp_reg_write(tp, 0xcc46, data); + data = ocp_reg_read(tp, 0xcc46); + data &= ~0x70; + ocp_reg_write(tp, 0xcc46, data); + data = ocp_reg_read(tp, 0xcc46); + data &= ~0x70; + data |= BIT(6) | BIT(4); + ocp_reg_write(tp, 0xcc46, data); + r8156_lock_mian(tp, false); + + data = ocp_reg_read(tp, 0xbd38); + data &= ~BIT(13); + ocp_reg_write(tp, 0xbd38, data); + data = ocp_reg_read(tp, 0xbd38); + data |= BIT(12); + ocp_reg_write(tp, 0xbd38, data); + ocp_reg_write(tp, 0xbd36, 0x0fb4); + data = ocp_reg_read(tp, 0xbd38); + data |= BIT(13); + ocp_reg_write(tp, 0xbd38, data); + +// if (tp->eee_en) { +// r8153_eee_en(tp, true); +// ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv); +// } + + r8153_aldps_en(tp, true); + r8152b_enable_fc(tp); + r8153_u2p3en(tp, true); +} + +static void r8156_init(struct r8152 *tp) +{ + u32 ocp_data; + u16 data; + int i; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, 0xd26b); + ocp_data &= ~BIT(0); + ocp_write_byte(tp, MCU_TYPE_USB, 0xd26b, ocp_data); + + ocp_write_word(tp, MCU_TYPE_USB, 0xd32a, 0); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, 0xcfee); + ocp_data |= BIT(5); + ocp_write_word(tp, MCU_TYPE_USB, 0xcfee, ocp_data); + + r8153b_u1u2en(tp, false); + for (i = 0; i < 500; i++) { if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) @@ -4182,6 +10853,11 @@ static void r8153b_init(struct r8152 *tp) } data = r8153_phy_status(tp, 0); + if (data == PHY_STAT_EXT_INIT) { + data = ocp_reg_read(tp, 0xa468); + data &= ~(BIT(3) | BIT(0)); + ocp_reg_write(tp, 0xa468, data); + } data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { @@ -4200,29 +10876,51 @@ static void r8153b_init(struct r8152 *tp) ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500); r8153b_power_cut_en(tp, false); - r8153b_ups_en(tp, false); - r8153b_queue_wake(tp, false); + r8156_ups_en(tp, false); + r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); + r8153b_u1u2en(tp, true); usb_enable_lpm(tp->udev); - /* MAC clock speed down */ - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); - ocp_data |= MAC_CLK_SPDWN_EN; - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); + r8156_mac_clk_spd(tp, true); - set_bit(GREEN_ETHERNET, &tp->flags); + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data &= ~BIT(14); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, 0xd398); + if (rtl8152_get_speed(tp) & LINK_STATUS) + ocp_data |= BIT(15); + else + ocp_data &= ~BIT(15); + ocp_data &= ~BIT(8); + ocp_data |= BIT(0); + ocp_write_word(tp, MCU_TYPE_PLA, 0xd398, ocp_data); + +// set_bit(GREEN_ETHERNET, &tp->flags); /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, 0xcfd9); + ocp_data |= BIT(2); + ocp_write_byte(tp, MCU_TYPE_USB, 0xcfd9, ocp_data); + rtl_tally_reset(tp); tp->coalesce = 15000; /* 15 us */ } +static bool rtl_vendor_mode(struct usb_interface *intf) +{ + struct usb_host_interface *alt = intf->cur_altsetting; + + return alt->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC; +} + static int rtl8152_pre_reset(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); @@ -4236,8 +10934,11 @@ static int rtl8152_pre_reset(struct usb_interface *intf) return 0; netif_stop_queue(netdev); + tasklet_disable(&tp->tx_tl); napi_disable(&tp->napi); + smp_mb__before_atomic(); clear_bit(WORK_ENABLE, &tp->flags); + smp_mb__after_atomic(); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); if (netif_carrier_ok(netdev)) { @@ -4261,7 +10962,9 @@ static int rtl8152_post_reset(struct usb_interface *intf) if (!netif_running(netdev)) return 0; + smp_mb__before_atomic(); set_bit(WORK_ENABLE, &tp->flags); + smp_mb__after_atomic(); if (netif_carrier_ok(netdev)) { mutex_lock(&tp->control); tp->rtl_ops.enable(tp); @@ -4271,6 +10974,7 @@ static int rtl8152_post_reset(struct usb_interface *intf) } napi_enable(&tp->napi); + tasklet_enable(&tp->tx_tl); netif_wake_queue(netdev); usb_submit_urb(tp->intr_urb, GFP_KERNEL); @@ -4312,7 +11016,9 @@ static int rtl8152_runtime_resume(struct r8152 *tp) tp->rtl_ops.autosuspend_en(tp, false); napi_disable(napi); + smp_mb__before_atomic(); set_bit(WORK_ENABLE, &tp->flags); + smp_mb__after_atomic(); if (netif_carrier_ok(netdev)) { if (rtl8152_get_speed(tp) & LINK_STATUS) { @@ -4328,8 +11034,11 @@ static int rtl8152_runtime_resume(struct r8152 *tp) clear_bit(SELECTIVE_SUSPEND, &tp->flags); smp_mb__after_atomic(); - if (!list_empty(&tp->rx_done)) + if (!list_empty(&tp->rx_done)) { + local_bh_disable(); napi_schedule(&tp->napi); + local_bh_enable(); + } usb_submit_urb(tp->intr_urb, GFP_NOIO); } else { @@ -4337,6 +11046,7 @@ static int rtl8152_runtime_resume(struct r8152 *tp) tp->rtl_ops.autosuspend_en(tp, false); clear_bit(SELECTIVE_SUSPEND, &tp->flags); + smp_mb__after_atomic(); } return 0; @@ -4348,10 +11058,15 @@ static int rtl8152_system_resume(struct r8152 *tp) netif_device_attach(netdev); - if (netif_running(netdev) && netdev->flags & IFF_UP) { + if (netif_running(netdev) && (netdev->flags & IFF_UP)) { tp->rtl_ops.up(tp); netif_carrier_off(netdev); + smp_mb__before_atomic(); set_bit(WORK_ENABLE, &tp->flags); + smp_mb__after_atomic(); + if (test_and_clear_bit(RECOVER_SPEED, &tp->flags)) + rtl8152_set_speed(tp, tp->autoneg, tp->speed, + tp->duplex, tp->advertising); usb_submit_urb(tp->intr_urb, GFP_NOIO); } @@ -4363,6 +11078,9 @@ static int rtl8152_runtime_suspend(struct r8152 *tp) struct net_device *netdev = tp->netdev; int ret = 0; + if (!tp->rtl_ops.autosuspend_en) + return -EBUSY; + set_bit(SELECTIVE_SUSPEND, &tp->flags); smp_mb__after_atomic(); @@ -4388,7 +11106,9 @@ static int rtl8152_runtime_suspend(struct r8152 *tp) } } + smp_mb__before_atomic(); clear_bit(WORK_ENABLE, &tp->flags); + smp_mb__after_atomic(); usb_kill_urb(tp->intr_urb); tp->rtl_ops.autosuspend_en(tp, true); @@ -4416,22 +11136,25 @@ out1: static int rtl8152_system_suspend(struct r8152 *tp) { struct net_device *netdev = tp->netdev; - int ret = 0; netif_device_detach(netdev); if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { struct napi_struct *napi = &tp->napi; + smp_mb__before_atomic(); clear_bit(WORK_ENABLE, &tp->flags); + smp_mb__after_atomic(); usb_kill_urb(tp->intr_urb); + tasklet_disable(&tp->tx_tl); napi_disable(napi); cancel_delayed_work_sync(&tp->schedule); tp->rtl_ops.down(tp); napi_enable(napi); + tasklet_enable(&tp->tx_tl); } - return ret; + return 0; } static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) @@ -4552,22 +11275,128 @@ static void rtl8152_get_drvinfo(struct net_device *netdev, } static -int rtl8152_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *cmd) +int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) { struct r8152 *tp = netdev_priv(netdev); - int ret; - - if (!tp->mii.mdio_read) - return -EOPNOTSUPP; + u16 bmcr, bmsr; + int ret, advert; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; + cmd->supported = + (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | SUPPORTED_MII); + + /* only supports twisted-pair */ + cmd->port = PORT_MII; + + /* only supports internal transceiver */ + cmd->transceiver = XCVR_INTERNAL; + cmd->phy_address = 32; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31) + cmd->mdio_support = ETH_MDIO_SUPPORTS_C22; +#endif + cmd->advertising = ADVERTISED_MII; + mutex_lock(&tp->control); - mii_ethtool_get_link_ksettings(&tp->mii, cmd); + bmcr = r8152_mdio_read(tp, MII_BMCR); + bmsr = r8152_mdio_read(tp, MII_BMSR); + + advert = r8152_mdio_read(tp, MII_ADVERTISE); + if (advert & ADVERTISE_10HALF) + cmd->advertising |= ADVERTISED_10baseT_Half; + if (advert & ADVERTISE_10FULL) + cmd->advertising |= ADVERTISED_10baseT_Full; + if (advert & ADVERTISE_100HALF) + cmd->advertising |= ADVERTISED_100baseT_Half; + if (advert & ADVERTISE_100FULL) + cmd->advertising |= ADVERTISED_100baseT_Full; + if (advert & ADVERTISE_PAUSE_CAP) + cmd->advertising |= ADVERTISED_Pause; + if (advert & ADVERTISE_PAUSE_ASYM) + cmd->advertising |= ADVERTISED_Asym_Pause; + if (tp->mii.supports_gmii) { + u16 ctrl1000 = r8152_mdio_read(tp, MII_CTRL1000); + + cmd->supported |= SUPPORTED_1000baseT_Full; + + if (test_bit(SUPPORT_2500FULL, &tp->flags)) { + u16 data = ocp_reg_read(tp, 0xa5d4); + + cmd->supported |= SUPPORTED_2500baseX_Full; + if (data & BIT(7)) + cmd->advertising |= ADVERTISED_2500baseX_Full; + } + + if (ctrl1000 & ADVERTISE_1000HALF) + cmd->advertising |= ADVERTISED_1000baseT_Half; + if (ctrl1000 & ADVERTISE_1000FULL) + cmd->advertising |= ADVERTISED_1000baseT_Full; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31) + if (bmsr & BMSR_ANEGCOMPLETE) { + advert = r8152_mdio_read(tp, MII_LPA); + if (advert & LPA_LPACK) + cmd->lp_advertising |= ADVERTISED_Autoneg; + if (advert & ADVERTISE_10HALF) + cmd->lp_advertising |= + ADVERTISED_10baseT_Half; + if (advert & ADVERTISE_10FULL) + cmd->lp_advertising |= + ADVERTISED_10baseT_Full; + if (advert & ADVERTISE_100HALF) + cmd->lp_advertising |= + ADVERTISED_100baseT_Half; + if (advert & ADVERTISE_100FULL) + cmd->lp_advertising |= + ADVERTISED_100baseT_Full; + + if (tp->mii.supports_gmii) { + u16 stat1000 = r8152_mdio_read(tp, MII_STAT1000); + + if (stat1000 & LPA_1000HALF) + cmd->lp_advertising |= + ADVERTISED_1000baseT_Half; + if (stat1000 & LPA_1000FULL) + cmd->lp_advertising |= + ADVERTISED_1000baseT_Full; + } + } else { + cmd->lp_advertising = 0; + } +#endif + + if (bmcr & BMCR_ANENABLE) { + cmd->advertising |= ADVERTISED_Autoneg; + cmd->autoneg = AUTONEG_ENABLE; + } else { + cmd->autoneg = AUTONEG_DISABLE; + } + + + if (netif_running(netdev) && netif_carrier_ok(netdev)) { + u16 speed = rtl8152_get_speed(tp); + + if (speed & _100bps) + cmd->speed = SPEED_100; + else if (speed & _10bps) + cmd->speed = SPEED_10; + else if (tp->mii.supports_gmii && (speed & _1000bps)) + cmd->speed = SPEED_1000; + else if (test_bit(SUPPORT_2500FULL, &tp->flags) && + (speed & _2500bps)) + cmd->speed = SPEED_2500; + + cmd->duplex = (speed & FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; + } else { + cmd->speed = SPEED_UNKNOWN; + cmd->duplex = DUPLEX_UNKNOWN; + } mutex_unlock(&tp->control); @@ -4577,8 +11406,8 @@ out: return ret; } -static int rtl8152_set_link_ksettings(struct net_device *dev, - const struct ethtool_link_ksettings *cmd) +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0) +static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct r8152 *tp = netdev_priv(dev); int ret; @@ -4589,12 +11418,13 @@ static int rtl8152_set_link_ksettings(struct net_device *dev, mutex_lock(&tp->control); - ret = rtl8152_set_speed(tp, cmd->base.autoneg, cmd->base.speed, - cmd->base.duplex); + ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex, + cmd->advertising); if (!ret) { - tp->autoneg = cmd->base.autoneg; - tp->speed = cmd->base.speed; - tp->duplex = cmd->base.duplex; + tp->autoneg = cmd->autoneg; + tp->speed = cmd->speed; + tp->duplex = cmd->duplex; + tp->advertising = cmd->advertising; } mutex_unlock(&tp->control); @@ -4604,6 +11434,73 @@ static int rtl8152_set_link_ksettings(struct net_device *dev, out: return ret; } +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0) */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) +static int rtl8152_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct ethtool_cmd ecmd; + int ret; + + memset(&ecmd, 0, sizeof(ecmd)); + ret = rtl8152_get_settings(netdev, &ecmd); + if (ret < 0) + goto out; + + /* only supports twisted-pair */ + cmd->base.port = ecmd.port; + + cmd->base.phy_address = ecmd.phy_address; + cmd->base.mdio_support = ecmd.mdio_support; + cmd->base.autoneg = ecmd.autoneg; + cmd->base.speed = ecmd.speed; + cmd->base.duplex = ecmd.duplex; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + ecmd.supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + ecmd.advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, + ecmd.lp_advertising); + +out: + return ret; +} + +static int rtl8152_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct r8152 *tp = netdev_priv(dev); + u32 advertising; + int ret; + + ret = usb_autopm_get_interface(tp->intf); + if (ret < 0) + goto out; + + mutex_lock(&tp->control); + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + ret = rtl8152_set_speed(tp, cmd->base.autoneg, cmd->base.speed, + cmd->base.duplex, advertising); + if (!ret) { + tp->autoneg = cmd->base.autoneg; + tp->speed = cmd->base.speed; + tp->duplex = cmd->base.duplex; + tp->advertising = advertising; + } + + mutex_unlock(&tp->control); + + usb_autopm_put_interface(tp->intf); + +out: + return ret; +} +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) */ static const char rtl8152_gstrings[][ETH_GSTRING_LEN] = { "tx_packets", @@ -4621,6 +11518,12 @@ static const char rtl8152_gstrings[][ETH_GSTRING_LEN] = { "tx_underrun", }; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) +static int rtl8152_get_sset_count(struct net_device *dev) +{ + return ARRAY_SIZE(rtl8152_gstrings); +} +#else static int rtl8152_get_sset_count(struct net_device *dev, int sset) { switch (sset) { @@ -4630,6 +11533,7 @@ static int rtl8152_get_sset_count(struct net_device *dev, int sset) return -EOPNOTSUPP; } } +#endif static void rtl8152_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) @@ -4640,8 +11544,15 @@ static void rtl8152_get_ethtool_stats(struct net_device *dev, if (usb_autopm_get_interface(tp->intf) < 0) return; + if (mutex_lock_interruptible(&tp->control) < 0) { + usb_autopm_put_interface(tp->intf); + return; + } + generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA); + mutex_unlock(&tp->control); + usb_autopm_put_interface(tp->intf); data[0] = le64_to_cpu(tally.tx_packets); @@ -4668,9 +11579,10 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data) } } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) static int r8152_get_eee(struct r8152 *tp, struct ethtool_eee *eee) { - u32 ocp_data, lp, adv, supported = 0; + u32 lp, adv, supported = 0; u16 val; val = r8152_mmd_read(tp, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); @@ -4682,10 +11594,7 @@ static int r8152_get_eee(struct r8152 *tp, struct ethtool_eee *eee) val = r8152_mmd_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE); lp = mmd_eee_adv_to_ethtool_adv_t(val); - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); - ocp_data &= EEE_RX_EN | EEE_TX_EN; - - eee->eee_enabled = !!ocp_data; + eee->eee_enabled = tp->eee_en; eee->eee_active = !!(supported & adv & lp); eee->supported = supported; eee->advertised = adv; @@ -4699,18 +11608,21 @@ static int r8152_set_eee(struct r8152 *tp, struct ethtool_eee *eee) u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised); r8152_eee_en(tp, eee->eee_enabled); + tp->eee_en = eee->eee_enabled; - if (!eee->eee_enabled) - val = 0; - - r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); + if (eee->eee_enabled) { + r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); + tp->eee_adv = val; + } else { + r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0); + } return 0; } static int r8153_get_eee(struct r8152 *tp, struct ethtool_eee *eee) { - u32 ocp_data, lp, adv, supported = 0; + u32 lp, adv, supported = 0; u16 val; val = ocp_reg_read(tp, OCP_EEE_ABLE); @@ -4722,10 +11634,7 @@ static int r8153_get_eee(struct r8152 *tp, struct ethtool_eee *eee) val = ocp_reg_read(tp, OCP_EEE_LPABLE); lp = mmd_eee_adv_to_ethtool_adv_t(val); - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); - ocp_data &= EEE_RX_EN | EEE_TX_EN; - - eee->eee_enabled = !!ocp_data; + eee->eee_enabled = tp->eee_en; eee->eee_active = !!(supported & adv & lp); eee->supported = supported; eee->advertised = adv; @@ -4739,11 +11648,14 @@ static int r8153_set_eee(struct r8152 *tp, struct ethtool_eee *eee) u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised); r8153_eee_en(tp, eee->eee_enabled); + tp->eee_en = eee->eee_enabled; - if (!eee->eee_enabled) - val = 0; - - ocp_reg_write(tp, OCP_EEE_ADV, val); + if (eee->eee_enabled) { + ocp_reg_write(tp, OCP_EEE_ADV, val); + tp->eee_adv = val; + } else { + ocp_reg_write(tp, OCP_EEE_ADV, 0); + } return 0; } @@ -4752,12 +11664,32 @@ static int r8153b_set_eee(struct r8152 *tp, struct ethtool_eee *eee) { u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised); - r8153b_eee_en(tp, eee->eee_enabled); + r8153_eee_en(tp, eee->eee_enabled); + tp->eee_en = eee->eee_enabled; - if (!eee->eee_enabled) - val = 0; + if (eee->eee_enabled) { + ocp_reg_write(tp, OCP_EEE_ADV, val); + tp->eee_adv = val; + } else { + ocp_reg_write(tp, OCP_EEE_ADV, 0); + } - ocp_reg_write(tp, OCP_EEE_ADV, val); + return 0; +} + +static int r8156_set_eee(struct r8152 *tp, struct ethtool_eee *eee) +{ + u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised); + + r8156_eee_en(tp, eee->eee_enabled); + tp->eee_en = eee->eee_enabled; + + if (eee->eee_enabled) { + ocp_reg_write(tp, OCP_EEE_ADV, val); + tp->eee_adv = val; + } else { + ocp_reg_write(tp, OCP_EEE_ADV, 0); + } return 0; } @@ -4768,6 +11700,11 @@ rtl_ethtool_get_eee(struct net_device *net, struct ethtool_eee *edata) struct r8152 *tp = netdev_priv(net); int ret; + if (!tp->rtl_ops.eee_get) { + ret = -EOPNOTSUPP; + goto out; + } + ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; @@ -4790,6 +11727,11 @@ rtl_ethtool_set_eee(struct net_device *net, struct ethtool_eee *edata) struct r8152 *tp = netdev_priv(net); int ret; + if (!tp->rtl_ops.eee_get) { + ret = -EOPNOTSUPP; + goto out; + } + ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; @@ -4798,7 +11740,7 @@ rtl_ethtool_set_eee(struct net_device *net, struct ethtool_eee *edata) ret = tp->rtl_ops.eee_set(tp, edata); if (!ret) - ret = mii_nway_restart(&tp->mii); + ret = rtl_nway_restart(tp); mutex_unlock(&tp->control); @@ -4807,6 +11749,7 @@ rtl_ethtool_set_eee(struct net_device *net, struct ethtool_eee *edata) out: return ret; } +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) */ static int rtl8152_nway_reset(struct net_device *dev) { @@ -4819,7 +11762,7 @@ static int rtl8152_nway_reset(struct net_device *dev) mutex_lock(&tp->control); - ret = mii_nway_restart(&tp->mii); + ret = rtl_nway_restart(tp); mutex_unlock(&tp->control); @@ -4858,6 +11801,7 @@ static int rtl8152_set_coalesce(struct net_device *netdev, case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: + case RTL_TEST_01: /* fix me */ return -EOPNOTSUPP; default: break; @@ -4886,8 +11830,22 @@ static int rtl8152_set_coalesce(struct net_device *netdev, return ret; } +static int rtl8152_ethtool_begin(struct net_device *netdev) +{ + struct r8152 *tp = netdev_priv(netdev); + + if (unlikely(tp->rtk_enable_diag)) + return -EBUSY; + + return 0; +} + static const struct ethtool_ops ops = { .get_drvinfo = rtl8152_get_drvinfo, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0) + .get_settings = rtl8152_get_settings, + .set_settings = rtl8152_set_settings, +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0) */ .get_link = ethtool_op_get_link, .nway_reset = rtl8152_nway_reset, .get_msglevel = rtl8152_get_msglevel, @@ -4897,25 +11855,219 @@ static const struct ethtool_ops ops = { .get_strings = rtl8152_get_strings, .get_sset_count = rtl8152_get_sset_count, .get_ethtool_stats = rtl8152_get_ethtool_stats, +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ethtool_op_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, +#ifdef NETIF_F_TSO + .get_tso = ethtool_op_get_tso, + .set_tso = ethtool_op_set_tso, +#endif +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) */ .get_coalesce = rtl8152_get_coalesce, .set_coalesce = rtl8152_set_coalesce, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) .get_eee = rtl_ethtool_get_eee, .set_eee = rtl_ethtool_set_eee, +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) .get_link_ksettings = rtl8152_get_link_ksettings, .set_link_ksettings = rtl8152_set_link_ksettings, +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) */ + .begin = rtl8152_ethtool_begin, }; +static int rtltool_ioctl(struct r8152 *tp, struct ifreq *ifr) +{ + struct net_device *netdev = tp->netdev; + struct rtltool_cmd my_cmd, *myptr; + struct usb_device_info *uinfo; + struct usb_device *udev; + __le32 ocp_data; + void *buffer; + int ret; + + myptr = (struct rtltool_cmd *)ifr->ifr_data; + if (copy_from_user(&my_cmd, myptr, sizeof(my_cmd))) + return -EFAULT; + + ret = 0; + + switch (my_cmd.cmd) { + case RTLTOOL_PLA_OCP_READ_DWORD: + pla_ocp_read(tp, (u16)my_cmd.offset, sizeof(ocp_data), + &ocp_data); + my_cmd.data = __le32_to_cpu(ocp_data); + + if (copy_to_user(myptr, &my_cmd, sizeof(my_cmd))) { + ret = -EFAULT; + break; + } + break; + + case RTLTOOL_PLA_OCP_WRITE_DWORD: + if (!tp->rtk_enable_diag && net_ratelimit()) + netif_warn(tp, drv, netdev, + "rtk diag isn't enable\n"); + + ocp_data = __cpu_to_le32(my_cmd.data); + pla_ocp_write(tp, (u16)my_cmd.offset, (u16)my_cmd.byteen, + sizeof(ocp_data), &ocp_data); + break; + + case RTLTOOL_USB_OCP_READ_DWORD: + usb_ocp_read(tp, (u16)my_cmd.offset, sizeof(ocp_data), + &ocp_data); + my_cmd.data = __le32_to_cpu(ocp_data); + + if (copy_to_user(myptr, &my_cmd, sizeof(my_cmd))) { + ret = -EFAULT; + break; + } + break; + + + case RTLTOOL_USB_OCP_WRITE_DWORD: + if (!tp->rtk_enable_diag && net_ratelimit()) + netif_warn(tp, drv, netdev, + "rtk diag isn't enable\n"); + + ocp_data = __cpu_to_le32(my_cmd.data); + usb_ocp_write(tp, (u16)my_cmd.offset, (u16)my_cmd.byteen, + sizeof(ocp_data), &ocp_data); + break; + + case RTLTOOL_PLA_OCP_READ: + buffer = kmalloc(my_cmd.data, GFP_KERNEL); + if (!buffer) { + ret = -ENOMEM; + break; + } + + pla_ocp_read(tp, (u16)my_cmd.offset, my_cmd.data, buffer); + + if (copy_to_user(my_cmd.buf, buffer, my_cmd.data)) + ret = -EFAULT; + + kfree(buffer); + break; + + case RTLTOOL_PLA_OCP_WRITE: + if (!tp->rtk_enable_diag && net_ratelimit()) + netif_warn(tp, drv, netdev, + "rtk diag isn't enable\n"); + + buffer = kmalloc(my_cmd.data, GFP_KERNEL); + if (!buffer) { + ret = -ENOMEM; + break; + } + + if (copy_from_user(buffer, my_cmd.buf, my_cmd.data)) { + ret = -EFAULT; + kfree(buffer); + break; + } + + pla_ocp_write(tp, (u16)my_cmd.offset, (u16)my_cmd.byteen, + my_cmd.data, buffer); + kfree(buffer); + break; + + case RTLTOOL_USB_OCP_READ: + buffer = kmalloc(my_cmd.data, GFP_KERNEL); + if (!buffer) { + ret = -ENOMEM; + break; + } + + usb_ocp_read(tp, (u16)my_cmd.offset, my_cmd.data, buffer); + + if (copy_to_user(my_cmd.buf, buffer, my_cmd.data)) + ret = -EFAULT; + + kfree(buffer); + break; + + case RTLTOOL_USB_OCP_WRITE: + if (!tp->rtk_enable_diag && net_ratelimit()) + netif_warn(tp, drv, netdev, + "rtk diag isn't enable\n"); + + buffer = kmalloc(my_cmd.data, GFP_KERNEL); + if (!buffer) { + ret = -ENOMEM; + break; + } + + if (copy_from_user(buffer, my_cmd.buf, my_cmd.data)) { + ret = -EFAULT; + kfree(buffer); + break; + } + + usb_ocp_write(tp, (u16)my_cmd.offset, (u16)my_cmd.byteen, + my_cmd.data, buffer); + kfree(buffer); + break; + + case RTLTOOL_USB_INFO: + uinfo = (struct usb_device_info *)&my_cmd.nic_info; + udev = tp->udev; + uinfo->idVendor = __le16_to_cpu(udev->descriptor.idVendor); + uinfo->idProduct = __le16_to_cpu(udev->descriptor.idProduct); + uinfo->bcdDevice = __le16_to_cpu(udev->descriptor.bcdDevice); + strlcpy(uinfo->devpath, udev->devpath, sizeof(udev->devpath)); + pla_ocp_read(tp, PLA_IDR, sizeof(uinfo->dev_addr), + uinfo->dev_addr); + + if (copy_to_user(myptr, &my_cmd, sizeof(my_cmd))) + ret = -EFAULT; + + break; + + case RTL_ENABLE_USB_DIAG: + ret = usb_autopm_get_interface(tp->intf); + if (ret < 0) + break; + + mutex_lock(&tp->control); + tp->rtk_enable_diag++; + netif_info(tp, drv, netdev, "enable rtk diag %d\n", + tp->rtk_enable_diag); + break; + + case RTL_DISABLE_USB_DIAG: + if (!tp->rtk_enable_diag) { + netif_err(tp, drv, netdev, + "Invalid using rtk diag\n"); + ret = -EPERM; + break; + } + + rtk_disable_diag(tp); + break; + + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) { struct r8152 *tp = netdev_priv(netdev); struct mii_ioctl_data *data = if_mii(rq); - int res; + int ret; if (test_bit(RTL8152_UNPLUG, &tp->flags)) return -ENODEV; - res = usb_autopm_get_interface(tp->intf); - if (res < 0) + ret = usb_autopm_get_interface(tp->intf); + if (ret < 0) goto out; switch (cmd) { @@ -4924,6 +12076,11 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) break; case SIOCGMIIREG: + if (unlikely(tp->rtk_enable_diag)) { + ret = -EBUSY; + break; + } + mutex_lock(&tp->control); data->val_out = r8152_mdio_read(tp, data->reg_num); mutex_unlock(&tp->control); @@ -4931,22 +12088,36 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) case SIOCSMIIREG: if (!capable(CAP_NET_ADMIN)) { - res = -EPERM; + ret = -EPERM; break; } + + if (unlikely(tp->rtk_enable_diag)) { + ret = -EBUSY; + break; + } + mutex_lock(&tp->control); r8152_mdio_write(tp, data->reg_num, data->val_in); mutex_unlock(&tp->control); break; + case SIOCDEVPRIVATE: + if (!capable(CAP_NET_ADMIN)) { + ret = -EPERM; + break; + } + ret = rtltool_ioctl(tp, rq); + break; + default: - res = -EOPNOTSUPP; + ret = -EOPNOTSUPP; } usb_autopm_put_interface(tp->intf); out: - return res; + return ret; } static int rtl8152_change_mtu(struct net_device *dev, int new_mtu) @@ -4958,12 +12129,21 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu) case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) + return eth_change_mtu(dev, new_mtu); +#else dev->mtu = new_mtu; return 0; +#endif default: break; } +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) + if (new_mtu < 68 || new_mtu > RTL8153_MAX_MTU) + return -EINVAL; +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) */ + ret = usb_autopm_get_interface(tp->intf); if (ret < 0) return ret; @@ -4988,19 +12168,27 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu) return ret; } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) static const struct net_device_ops rtl8152_netdev_ops = { .ndo_open = rtl8152_open, .ndo_stop = rtl8152_close, .ndo_do_ioctl = rtl8152_ioctl, .ndo_start_xmit = rtl8152_start_xmit, .ndo_tx_timeout = rtl8152_tx_timeout, +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + .ndo_vlan_rx_register = rtl8152_vlan_rx_register, +#else .ndo_set_features = rtl8152_set_features, +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) */ .ndo_set_rx_mode = rtl8152_set_rx_mode, .ndo_set_mac_address = rtl8152_set_mac_address, .ndo_change_mtu = rtl8152_change_mtu, .ndo_validate_addr = eth_validate_addr, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,4) .ndo_features_check = rtl8152_features_check, +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,4) */ }; +#endif static void rtl8152_unload(struct r8152 *tp) { @@ -5042,11 +12230,16 @@ static int rtl_ops_init(struct r8152 *tp) ops->up = rtl8152_up; ops->down = rtl8152_down; ops->unload = rtl8152_unload; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) ops->eee_get = r8152_get_eee; ops->eee_set = r8152_set_eee; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) */ ops->in_nway = rtl8152_in_nway; ops->hw_phy_cfg = r8152b_hw_phy_cfg; ops->autosuspend_en = rtl_runtime_suspend_enable; + tp->rx_buf_sz = 16 * 1024; + tp->eee_en = true; + tp->eee_adv = MDIO_EEE_100TX; break; case RTL_VER_03: @@ -5059,26 +12252,75 @@ static int rtl_ops_init(struct r8152 *tp) ops->up = rtl8153_up; ops->down = rtl8153_down; ops->unload = rtl8153_unload; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) ops->eee_get = r8153_get_eee; ops->eee_set = r8153_set_eee; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) */ ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153_hw_phy_cfg; ops->autosuspend_en = rtl8153_runtime_enable; + tp->rx_buf_sz = 32 * 1024; + tp->eee_en = true; + tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; break; case RTL_VER_08: case RTL_VER_09: ops->init = r8153b_init; ops->enable = rtl8153_enable; - ops->disable = rtl8153b_disable; + ops->disable = rtl8153_disable; ops->up = rtl8153b_up; ops->down = rtl8153b_down; ops->unload = rtl8153b_unload; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) ops->eee_get = r8153_get_eee; ops->eee_set = r8153b_set_eee; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) */ ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153b_hw_phy_cfg; ops->autosuspend_en = rtl8153b_runtime_enable; + tp->rx_buf_sz = 32 * 1024; + tp->eee_en = true; + tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; + break; + + case RTL_TEST_01: + ops->init = r8156_init; + ops->enable = rtl8156_enable; + ops->disable = rtl8153_disable; + ops->up = rtl8156_up; + ops->down = rtl8156_down; + ops->unload = rtl8153_unload; +//#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) +// ops->eee_get = r8156_get_eee; +// ops->eee_set = r8156_set_eee; +//#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) */ + ops->in_nway = rtl8153_in_nway; + ops->hw_phy_cfg = r8156_hw_phy_cfg; + ops->autosuspend_en = rtl8156_runtime_enable; + tp->rx_buf_sz = 48 * 1024; + set_bit(SUPPORT_2500FULL, &tp->flags); + break; + + case RTL_VER_11: + tp->eee_en = true; + tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; + case RTL_VER_10: + ops->init = r8156_init; + ops->enable = rtl8156_enable; + ops->disable = rtl8153_disable; + ops->up = rtl8156_up; + ops->down = rtl8156_down; + ops->unload = rtl8153_unload; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) + ops->eee_get = r8153_get_eee; + ops->eee_set = r8156_set_eee; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) */ + ops->in_nway = rtl8153_in_nway; + ops->hw_phy_cfg = r8156_hw_phy_cfg2; + ops->autosuspend_en = rtl8156_runtime_enable; + tp->rx_buf_sz = 48 * 1024; + set_bit(SUPPORT_2500FULL, &tp->flags); break; default: @@ -5138,6 +12380,15 @@ static u8 rtl_get_version(struct usb_interface *intf) case 0x6010: version = RTL_VER_09; break; + case 0x7010: + version = RTL_TEST_01; + break; + case 0x7020: + version = RTL_VER_10; + break; + case 0x7030: + version = RTL_VER_11; + break; default: version = RTL_VER_UNKNOWN; dev_info(&intf->dev, "Unknown version 0x%04x\n", ocp_data); @@ -5149,6 +12400,331 @@ static u8 rtl_get_version(struct usb_interface *intf) return version; } +#ifdef RTL8152_DEBUG + +static ssize_t +ocp_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct usb_interface *intf = to_usb_interface(dev); + struct r8152 *tp = usb_get_intfdata(intf); + char tmp[256]; + struct tally_counter tally; + int ret; + + strcpy(buf, dev_name(dev)); + strcat(buf, "\n"); + strcat(buf, DRIVER_VERSION); + strcat(buf, "\n"); + + switch (tp->version) { + case RTL_VER_11: + strcat(buf, "RTL_VER_11\n"); + strcat(buf, "nc_patch_181008_usb\n"); + strcat(buf, "nc1_patch_181029_usb\n"); + strcat(buf, "nc2_patch_180821_usb\n"); + strcat(buf, "uc2_patch_181018_usb\n"); + strcat(buf, "USB_patch_code_20180906_v2\n"); + strcat(buf, "PLA_patch_code_20180914_v3\n"); + break; + default: + strcat(buf, "\n\n\n\n\n\n\n"); + break; + } + + ret = usb_autopm_get_interface(intf); + if (ret < 0) + return ret; + + ret = mutex_lock_interruptible(&tp->control); + if (ret < 0) { + usb_autopm_put_interface(intf); + goto err1; + } + + generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA); + + mutex_unlock(&tp->control); + + usb_autopm_put_interface(intf); + + sprintf(tmp, "tx_packets = %Lu\n", le64_to_cpu(tally.tx_packets)); + strcat(buf, tmp); + sprintf(tmp, "rx_packets = %Lu\n", le64_to_cpu(tally.rx_packets)); + strcat(buf, tmp); + sprintf(tmp, "tx_errors = %Lu\n", le64_to_cpu(tally.tx_errors)); + strcat(buf, tmp); + sprintf(tmp, "tx_errors = %u\n", le32_to_cpu(tally.rx_errors)); + strcat(buf, tmp); + sprintf(tmp, "rx_missed = %u\n", le16_to_cpu(tally.rx_missed)); + strcat(buf, tmp); + sprintf(tmp, "align_errors = %u\n", le16_to_cpu(tally.align_errors)); + strcat(buf, tmp); + sprintf(tmp, "tx_one_collision = %u\n", + le32_to_cpu(tally.tx_one_collision)); + strcat(buf, tmp); + sprintf(tmp, "tx_multi_collision = %u\n", + le32_to_cpu(tally.tx_multi_collision)); + strcat(buf, tmp); + sprintf(tmp, "rx_unicast = %Lu\n", le64_to_cpu(tally.rx_unicast)); + strcat(buf, tmp); + sprintf(tmp, "rx_broadcast = %Lu\n", le64_to_cpu(tally.rx_broadcast)); + strcat(buf, tmp); + sprintf(tmp, "rx_multicast = %u\n", le32_to_cpu(tally.rx_multicast)); + strcat(buf, tmp); + sprintf(tmp, "tx_aborted = %u\n", le16_to_cpu(tally.tx_aborted)); + strcat(buf, tmp); + sprintf(tmp, "tx_underrun = %u\n", le16_to_cpu(tally.tx_underrun)); + strcat(buf, tmp); + +err1: + if (ret < 0) + return ret; + else + return strlen(buf); +} + +static inline bool hex_value(char p) +{ + return (p >= '0' && p <= '9') || + (p >= 'a' && p <= 'f') || + (p >= 'A' && p <= 'F'); +} + +static int ocp_count(char *v1) +{ + int len = strlen(v1), count = 0; + char *v2 = strchr(v1, ' '); + bool is_vaild = false; + + if (len < 5 || !v2) + goto out1; +// else if (strncmp(v1, "pla ", 4) && strncmp(v1, "usb ", 4)) +// goto out1; + + v1 = v2; + len = strlen(v2); + while(len) { + if (*v1 != ' ') + break; + v1++; + len--; + } + + if (!len || *v1 == '\n') + goto out1; + +check: + v2 = strchr(v1, ' '); + + if (len > 2 && !strncasecmp(v1, "0x", 2)) { + v1 += 2; + len -= 2; + if (v1 == v2 || *v1 == '\n') + goto out1; + } + + if (v2) { + while (v1 < v2) { + if (!hex_value(*v1)) + goto out1; + v1++; + len--; + } + + count++; + + while(len) { + if (*v1 != ' ') + break; + v1++; + len--; + } + + if (len) + goto check; + + is_vaild = true; + } else { + int i; + + if (len && v1[len - 1] == '\n') + len--; + + for (i = 0; i < len; i++) { + if (!hex_value(*v1)) + goto out1; + v1++; + } + + if (len) + count++; + + is_vaild = true; + } + +out1: + if (is_vaild) + return count; + else + return 0; +} + +static ssize_t ocp_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct usb_interface *intf; + struct net_device *netdev; + u32 v1, v2, v3, v4; + struct r8152 *tp; + u16 type; + int num; + + intf = to_usb_interface(dev); + tp = usb_get_intfdata(intf); + netdev = tp->netdev; + + if (!strncmp(buf, "pla ", 4)) + type = MCU_TYPE_PLA; + else if (!strncmp(buf, "usb ", 4)) + type = MCU_TYPE_USB; + else + return -EINVAL; + + if (!ocp_count((char *)buf)) + return -EINVAL; + + num = sscanf(strchr(buf, ' '), "%x %x %x %x\n", &v1, &v2, &v3, &v4); + + if (num > 1) { + if ((v1 == 2 && (v2 & 1)) || + (v1 == 4 && (v2 & 3)) || + (type == MCU_TYPE_PLA && + (v2 < 0xc000 || (v2 & ~3) == PLA_OCP_GPHY_BASE))) + return -EINVAL; + } + + count = usb_autopm_get_interface(intf); + if (count < 0) + return count; + + count = mutex_lock_interruptible(&tp->control); + if (count < 0) + goto put; + + switch(num) { + case 2: + switch (v1) { + case 1: + netif_info(tp, drv, netdev, "%s read byte %x = %x\n", + type ? "PLA" : "USB", v2, + ocp_read_byte(tp, type, v2)); + break; + case 2: + netif_info(tp, drv, netdev, "%s read word %x = %x\n", + type ? "PLA" : "USB", v2, + ocp_read_word(tp, type, v2)); + break; + case 4: + netif_info(tp, drv, netdev, "%s read dword %x = %x\n", + type ? "PLA" : "USB", v2, + ocp_read_dword(tp, type, v2)); + break; + default: + count = -EINVAL; + break; + } + break; + case 3: + switch (v1) { + case 1: + netif_info(tp, drv, netdev, "%s write byte %x = %x\n", + type ? "PLA" : "USB", v2, v3); + ocp_write_byte(tp, type, v2, v3); + break; + case 2: + netif_info(tp, drv, netdev, "%s write word %x = %x\n", + type ? "PLA" : "USB", v2, v3); + ocp_write_word(tp, type, v2, v3); + break; + case 4: + netif_info(tp, drv, netdev, "%s write dword %x = %x\n", + type ? "PLA" : "USB", v2, v3); + ocp_write_dword(tp, type, v2, v3); + break; + default: + count = -EINVAL; + break; + } + break; + case 4: + case 1: + default: + count = -EINVAL; + break; + } + + mutex_unlock(&tp->control); + +put: + usb_autopm_put_interface(intf); + + return count; +} + +static DEVICE_ATTR_RW(ocp); + +static struct attribute *rtk_attrs[] = { + &dev_attr_ocp.attr, + NULL +}; + +#define ATTR_PLA_SIZE 0x3000 + +/* hexdump -e '"%04_ax\t" 16/1 "%02X " "\n"' pla */ +static ssize_t pla_read(struct file *fp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, + size_t size) +{ + struct device *dev = kobj_to_dev(kobj); + struct usb_interface *intf = to_usb_interface(dev); + struct r8152 *tp = usb_get_intfdata(intf); + struct net_device *netdev = tp->netdev; + + if (size <= ATTR_PLA_SIZE) + size = min(size, ATTR_PLA_SIZE - (size_t)offset); + else + return -EINVAL; + + /* rtnl_lock(); */ + if (mutex_lock_interruptible(&tp->control)) + return -EINTR; + + if (pla_ocp_read(tp, offset + 0xc000, (u16)size, buf) < 0) + netif_err(tp, drv, netdev, + "Read PLA offset 0x%Lx, len = %zd fail\n", + offset + 0xc000, size); + + mutex_unlock(&tp->control); + /* rtnl_unlock(); */ + + return size; +} + +static BIN_ATTR_RO(pla, ATTR_PLA_SIZE); + +static struct bin_attribute *rtk_bin_attrs[] = { + &bin_attr_pla, + NULL +}; + +static struct attribute_group rtk_attr_grp = { + .name = "nic_swsd", + .attrs = rtk_attrs, + .bin_attrs = rtk_bin_attrs, +}; + +#endif + static int rtl8152_probe(struct usb_interface *intf, const struct usb_device_id *id) { @@ -5161,8 +12737,12 @@ static int rtl8152_probe(struct usb_interface *intf, if (version == RTL_VER_UNKNOWN) return -ENODEV; - if (udev->actconfig->desc.bConfigurationValue != 1) { + if (!rtl_vendor_mode(intf)) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) + dev_err(&intf->dev, "The kernel too old to set configuration\n"); +#else usb_driver_set_configuration(udev, 1); +#endif return -ENODEV; } @@ -5203,14 +12783,35 @@ static int rtl8152_probe(struct usb_interface *intf, mutex_init(&tp->control); INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t); INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t); + tasklet_init(&tp->tx_tl, bottom_half, (unsigned long)tp); + tasklet_disable(&tp->tx_tl); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) + netdev->open = rtl8152_open; + netdev->stop = rtl8152_close; + netdev->get_stats = rtl8152_get_stats; + netdev->hard_start_xmit = rtl8152_start_xmit; + netdev->tx_timeout = rtl8152_tx_timeout; + netdev->change_mtu = rtl8152_change_mtu; + netdev->set_mac_address = rtl8152_set_mac_address; + netdev->do_ioctl = rtl8152_ioctl; + netdev->set_multicast_list = rtl8152_set_rx_mode; + netdev->vlan_rx_register = rtl8152_vlan_rx_register; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) + netdev->vlan_rx_kill_vid = rtl8152_vlan_rx_kill_vid; +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) */ +#else netdev->netdev_ops = &rtl8152_netdev_ops; +#endif /* HAVE_NET_DEVICE_OPS */ + netdev->watchdog_timeo = RTL8152_TX_TIMEOUT; netdev->features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39) netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | @@ -5218,15 +12819,23 @@ static int rtl8152_probe(struct usb_interface *intf, netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM | NETIF_F_TSO6; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39) */ if (tp->version == RTL_VER_01) { netdev->features &= ~NETIF_F_RXCSUM; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39) netdev->hw_features &= ~NETIF_F_RXCSUM; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39) */ } netdev->ethtool_ops = &ops; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE); +#else + netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) /* MTU range: 68 - 1500 or 9194 */ netdev->min_mtu = ETH_MIN_MTU; switch (tp->version) { @@ -5238,6 +12847,7 @@ static int rtl8152_probe(struct usb_interface *intf, netdev->max_mtu = RTL8153_MAX_MTU; break; } +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) */ tp->mii.dev = netdev; tp->mii.mdio_read = read_mii_word; @@ -5245,9 +12855,25 @@ static int rtl8152_probe(struct usb_interface *intf, tp->mii.phy_id_mask = 0x3f; tp->mii.reg_num_mask = 0x1f; tp->mii.phy_id = R8152_PHY_ID; + tp->mii.force_media = 0; + tp->mii.advertising = ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL; tp->autoneg = AUTONEG_ENABLE; - tp->speed = tp->mii.supports_gmii ? SPEED_1000 : SPEED_100; + tp->speed = SPEED_100; + tp->advertising = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full; + if (tp->mii.supports_gmii) { + if (test_bit(SUPPORT_2500FULL, &tp->flags) && + tp->udev->speed >= USB_SPEED_SUPER) { + tp->speed = SPEED_2500; + tp->advertising |= ADVERTISED_2500baseX_Full; + } else { + tp->speed = SPEED_1000; + } + tp->advertising |= ADVERTISED_1000baseT_Full; + } tp->duplex = DUPLEX_FULL; intf->needs_remote_wakeup = 1; @@ -5275,12 +12901,21 @@ static int rtl8152_probe(struct usb_interface *intf, else device_set_wakeup_enable(&udev->dev, false); + /* usb_enable_autosuspend(udev); */ + netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION); + netif_info(tp, probe, netdev, "%s\n", PATENTS); + +#ifdef RTL8152_DEBUG + if (sysfs_create_group(&intf->dev.kobj, &rtk_attr_grp) < 0) + netif_err(tp, probe, netdev, "creat rtk_attr_grp fail\n"); +#endif return 0; out1: netif_napi_del(&tp->napi); + tasklet_kill(&tp->tx_tl); usb_set_intfdata(intf, NULL); out: free_netdev(netdev); @@ -5291,56 +12926,74 @@ static void rtl8152_disconnect(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); +#ifdef RTL8152_DEBUG + sysfs_remove_group(&intf->dev.kobj, &rtk_attr_grp); +#endif + usb_set_intfdata(intf, NULL); if (tp) { - struct usb_device *udev = tp->udev; - - if (udev->state == USB_STATE_NOTATTACHED) - set_bit(RTL8152_UNPLUG, &tp->flags); - - netif_napi_del(&tp->napi); + rtl_set_unplug(tp); unregister_netdev(tp->netdev); + netif_napi_del(&tp->napi); + tasklet_kill(&tp->tx_tl); cancel_delayed_work_sync(&tp->hw_phy_work); - tp->rtl_ops.unload(tp); + if (tp->rtl_ops.unload) + tp->rtl_ops.unload(tp); free_netdev(tp->netdev); } } #define REALTEK_USB_DEVICE(vend, prod) \ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ - USB_DEVICE_ID_MATCH_INT_CLASS, \ - .idVendor = (vend), \ - .idProduct = (prod), \ - .bInterfaceClass = USB_CLASS_VENDOR_SPEC \ + USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC) \ }, \ { \ - .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | \ - USB_DEVICE_ID_MATCH_DEVICE, \ - .idVendor = (vend), \ - .idProduct = (prod), \ - .bInterfaceClass = USB_CLASS_COMM, \ - .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ - .bInterfaceProtocol = USB_CDC_PROTO_NONE + USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_COMM, \ + USB_CDC_SUBCLASS_ETHERNET, \ + USB_CDC_PROTO_NONE) \ +}, \ +{ \ + USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_COMM, \ + USB_CDC_SUBCLASS_NCM, \ + USB_CDC_PROTO_NONE) /* table of devices that work with this driver */ static const struct usb_device_id rtl8152_table[] = { + /* Realtek */ {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8050)}, {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, + {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8156)}, + + /* Microsoft */ {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)}, {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)}, {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)}, + + /* Samsung */ {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, - {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, - {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)}, - {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069)}, - {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, - {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)}, - {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)}, - {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387)}, - {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)}, + + /* Lenovo */ + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3052)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3057)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720a)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720b)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x721e)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa359)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387)}, + + /* TP-LINK */ + {REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601)}, + + /* Nvidia */ {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)}, - {REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601)}, {} }; @@ -5357,7 +13010,9 @@ static struct usb_driver rtl8152_driver = { .pre_reset = rtl8152_pre_reset, .post_reset = rtl8152_post_reset, .supports_autosuspend = 1, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) .disable_hub_initiated_lpm = 1, +#endif }; module_usb_driver(rtl8152_driver); diff --git a/drivers/net/wireless/cnss2/debug.h b/drivers/net/wireless/cnss2/debug.h index f0bf2700df5e..ea6a886e7263 100644 --- a/drivers/net/wireless/cnss2/debug.h +++ b/drivers/net/wireless/cnss2/debug.h @@ -20,10 +20,7 @@ extern void *cnss_ipc_log_context; -#define cnss_ipc_log_string(_x...) do { \ - if (cnss_ipc_log_context) \ - ipc_log_string(cnss_ipc_log_context, _x); \ - } while (0) +#define cnss_ipc_log_string(_x...) ((void)0) #define cnss_pr_err(_fmt, ...) do { \ printk("%scnss: " _fmt, KERN_ERR, ##__VA_ARGS__); \ diff --git a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c index 3a7b2b787e7a..2b57b28e560b 100644 --- a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c +++ b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c @@ -362,7 +362,7 @@ static int __init wcnss_pre_alloc_init(void) debug_base = debugfs_create_dir(PRE_ALLOC_DEBUGFS_DIR, NULL); if (IS_ERR_OR_NULL(debug_base)) { - pr_err("%s: Failed to create debugfs dir\n", __func__); + pr_debug("%s: Failed to create debugfs dir\n", __func__); } else if (IS_ERR_OR_NULL(debugfs_create_file( PRE_ALLOC_DEBUGFS_FILE_OBJ, 0644, debug_base, NULL, diff --git a/drivers/net/wireless/cnss_utils/cnss_utils.c b/drivers/net/wireless/cnss_utils/cnss_utils.c index e1d69e1e6dec..b3dfbb698019 100644 --- a/drivers/net/wireless/cnss_utils/cnss_utils.c +++ b/drivers/net/wireless/cnss_utils/cnss_utils.c @@ -595,7 +595,7 @@ static int cnss_utils_debugfs_create(struct cnss_utils_priv *priv) if (IS_ERR(root_dentry)) { ret = PTR_ERR(root_dentry); - pr_err("Unable to create debugfs %d\n", ret); + pr_debug("Unable to create debugfs %d\n", ret); goto out; } priv->root_dentry = root_dentry; diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c index 30c419d22ef8..0015d0010f7d 100644 --- a/drivers/pci/host/pci-msm.c +++ b/drivers/pci/host/pci-msm.c @@ -246,57 +246,31 @@ } while (0) #define PCIE_DBG(dev, fmt, arg...) do { \ - if ((dev) && (dev)->ipc_log_long) \ - ipc_log_string((dev)->ipc_log_long, \ - "DBG1:%s: " fmt, __func__, arg); \ - if ((dev) && (dev)->ipc_log) \ - ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \ if (msm_pcie_debug_mask) \ pr_alert("%s: " fmt, __func__, arg); \ } while (0) #define PCIE_DBG2(dev, fmt, arg...) do { \ - if ((dev) && (dev)->ipc_log) \ - ipc_log_string((dev)->ipc_log, "DBG2:%s: " fmt, __func__, arg);\ if (msm_pcie_debug_mask) \ pr_alert("%s: " fmt, __func__, arg); \ } while (0) #define PCIE_DBG3(dev, fmt, arg...) do { \ - if ((dev) && (dev)->ipc_log) \ - ipc_log_string((dev)->ipc_log, "DBG3:%s: " fmt, __func__, arg);\ if (msm_pcie_debug_mask) \ pr_alert("%s: " fmt, __func__, arg); \ } while (0) -#define PCIE_DUMP(dev, fmt, arg...) do { \ - if ((dev) && (dev)->ipc_log_dump) \ - ipc_log_string((dev)->ipc_log_dump, \ - "DUMP:%s: " fmt, __func__, arg); \ - } while (0) +#define PCIE_DUMP(dev, fmt, arg...) ((void)0) #define PCIE_DBG_FS(dev, fmt, arg...) do { \ - if ((dev) && (dev)->ipc_log_dump) \ - ipc_log_string((dev)->ipc_log_dump, \ - "DBG_FS:%s: " fmt, __func__, arg); \ pr_alert("%s: " fmt, __func__, arg); \ } while (0) #define PCIE_INFO(dev, fmt, arg...) do { \ - if ((dev) && (dev)->ipc_log_long) \ - ipc_log_string((dev)->ipc_log_long, \ - "INFO:%s: " fmt, __func__, arg); \ - if ((dev) && (dev)->ipc_log) \ - ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \ pr_info("%s: " fmt, __func__, arg); \ } while (0) #define PCIE_ERR(dev, fmt, arg...) do { \ - if ((dev) && (dev)->ipc_log_long) \ - ipc_log_string((dev)->ipc_log_long, \ - "ERR:%s: " fmt, __func__, arg); \ - if ((dev) && (dev)->ipc_log) \ - ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \ pr_err("%s: " fmt, __func__, arg); \ } while (0) @@ -6758,7 +6732,7 @@ static int __init pcie_init(void) msm_pcie_dev[i].ipc_log = ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0); if (msm_pcie_dev[i].ipc_log == NULL) - pr_err("%s: unable to create IPC log context for %s\n", + pr_dbg("%s: unable to create IPC log context for %s\n", __func__, rc_name); else PCIE_DBG(&msm_pcie_dev[i], @@ -6768,7 +6742,7 @@ static int __init pcie_init(void) msm_pcie_dev[i].ipc_log_long = ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0); if (msm_pcie_dev[i].ipc_log_long == NULL) - pr_err("%s: unable to create IPC log context for %s\n", + pr_dbg("%s: unable to create IPC log context for %s\n", __func__, rc_name); else PCIE_DBG(&msm_pcie_dev[i], @@ -6778,7 +6752,7 @@ static int __init pcie_init(void) msm_pcie_dev[i].ipc_log_dump = ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0); if (msm_pcie_dev[i].ipc_log_dump == NULL) - pr_err("%s: unable to create IPC log context for %s\n", + pr_dbg("%s: unable to create IPC log context for %s\n", __func__, rc_name); else PCIE_DBG(&msm_pcie_dev[i], diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 0f98a750c708..dc2e2f6cf90f 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -224,14 +224,14 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev, raw_spin_lock_irqsave(&pctrl->lock, flags); - val = readl(base + g->ctl_reg); + val = readl_relaxed(base + g->ctl_reg); val &= ~mask; val |= i << g->mux_bit; /* Check if egpio present and enable that feature */ if (val & BIT(g->egpio_present)) val |= BIT(g->egpio_enable); - writel(val, base + g->ctl_reg); + writel_relaxed(val, base + g->ctl_reg); raw_spin_unlock_irqrestore(&pctrl->lock, flags); @@ -307,7 +307,7 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev, if (ret < 0) return ret; - val = readl(base + g->ctl_reg); + val = readl_relaxed(base + g->ctl_reg); arg = (val >> bit) & mask; /* Convert register value to pinconf value */ @@ -346,7 +346,7 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev, if (!arg) return -EINVAL; - val = readl(base + g->io_reg); + val = readl_relaxed(base + g->io_reg); arg = !!(val & BIT(g->in_bit)); break; case PIN_CONFIG_INPUT_ENABLE: @@ -422,12 +422,12 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev, case PIN_CONFIG_OUTPUT: /* set output value */ raw_spin_lock_irqsave(&pctrl->lock, flags); - val = readl(base + g->io_reg); + val = readl_relaxed(base + g->io_reg); if (arg) val |= BIT(g->out_bit); else val &= ~BIT(g->out_bit); - writel(val, base + g->io_reg); + writel_relaxed(val, base + g->io_reg); raw_spin_unlock_irqrestore(&pctrl->lock, flags); /* enable output */ @@ -450,10 +450,10 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev, } raw_spin_lock_irqsave(&pctrl->lock, flags); - val = readl(base + g->ctl_reg); + val = readl_relaxed(base + g->ctl_reg); val &= ~(mask << bit); val |= arg << bit; - writel(val, base + g->ctl_reg); + writel_relaxed(val, base + g->ctl_reg); raw_spin_unlock_irqrestore(&pctrl->lock, flags); } @@ -486,9 +486,9 @@ static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset) base = reassign_pctrl_reg(pctrl->soc, offset); raw_spin_lock_irqsave(&pctrl->lock, flags); - val = readl(base + g->ctl_reg); + val = readl_relaxed(base + g->ctl_reg); val &= ~BIT(g->oe_bit); - writel(val, base + g->ctl_reg); + writel_relaxed(val, base + g->ctl_reg); raw_spin_unlock_irqrestore(&pctrl->lock, flags); @@ -508,16 +508,16 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in base = reassign_pctrl_reg(pctrl->soc, offset); raw_spin_lock_irqsave(&pctrl->lock, flags); - val = readl(base + g->io_reg); + val = readl_relaxed(base + g->io_reg); if (value) val |= BIT(g->out_bit); else val &= ~BIT(g->out_bit); - writel(val, base + g->io_reg); + writel_relaxed(val, base + g->io_reg); - val = readl(base + g->ctl_reg); + val = readl_relaxed(base + g->ctl_reg); val |= BIT(g->oe_bit); - writel(val, base + g->ctl_reg); + writel_relaxed(val, base + g->ctl_reg); raw_spin_unlock_irqrestore(&pctrl->lock, flags); @@ -534,7 +534,7 @@ static int msm_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) g = &pctrl->soc->groups[offset]; base = reassign_pctrl_reg(pctrl->soc, offset); - val = readl(base + g->ctl_reg); + val = readl_relaxed(base + g->ctl_reg); /* 0 = output, 1 = input */ return val & BIT(g->oe_bit) ? 0 : 1; @@ -567,12 +567,12 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value) raw_spin_lock_irqsave(&pctrl->lock, flags); - val = readl(base + g->io_reg); + val = readl_relaxed(base + g->io_reg); if (value) val |= BIT(g->out_bit); else val &= ~BIT(g->out_bit); - writel(val, base + g->io_reg); + writel_relaxed(val, base + g->io_reg); raw_spin_unlock_irqrestore(&pctrl->lock, flags); } @@ -604,7 +604,7 @@ static void msm_gpio_dbg_show_one(struct seq_file *s, g = &pctrl->soc->groups[offset]; base = reassign_pctrl_reg(pctrl->soc, offset); - ctl_reg = readl(base + g->ctl_reg); + ctl_reg = readl_relaxed(base + g->ctl_reg); is_out = !!(ctl_reg & BIT(g->oe_bit)); func = (ctl_reg >> g->mux_bit) & 7; @@ -676,14 +676,14 @@ static void msm_gpio_update_dual_edge_pos(struct msm_pinctrl *pctrl, base = reassign_pctrl_reg(pctrl->soc, d->hwirq); do { - val = readl(base + g->io_reg) & BIT(g->in_bit); + val = readl_relaxed(base + g->io_reg) & BIT(g->in_bit); - pol = readl(base + g->intr_cfg_reg); + pol = readl_relaxed(base + g->intr_cfg_reg); pol ^= BIT(g->intr_polarity_bit); - writel(pol, base + g->intr_cfg_reg); + writel_relaxed(pol, base + g->intr_cfg_reg); - val2 = readl(base + g->io_reg) & BIT(g->in_bit); - intstat = readl(base + g->intr_status_reg); + val2 = readl_relaxed(base + g->io_reg) & BIT(g->in_bit); + intstat = readl_relaxed(base + g->intr_status_reg); if (intstat || (val == val2)) return; } while (loop_limit-- > 0); @@ -705,9 +705,9 @@ static void msm_gpio_irq_mask(struct irq_data *d) raw_spin_lock_irqsave(&pctrl->lock, flags); - val = readl(base + g->intr_cfg_reg); + val = readl_relaxed(base + g->intr_cfg_reg); val &= ~BIT(g->intr_enable_bit); - writel(val, base + g->intr_cfg_reg); + writel_relaxed(val, base + g->intr_cfg_reg); clear_bit(d->hwirq, pctrl->enabled_irqs); @@ -734,13 +734,13 @@ static void msm_gpio_irq_enable(struct irq_data *d) * any erraneous interrupts that would have got latched * when the intterupt is not in use. */ - val = readl(base + g->intr_status_reg); + val = readl_relaxed(base + g->intr_status_reg); val &= ~BIT(g->intr_status_bit); - writel(val, base + g->intr_status_reg); + writel_relaxed(val, base + g->intr_status_reg); - val = readl(base + g->intr_cfg_reg); + val = readl_relaxed(base + g->intr_cfg_reg); val |= BIT(g->intr_enable_bit); - writel(val, base + g->intr_cfg_reg); + writel_relaxed(val, base + g->intr_cfg_reg); set_bit(d->hwirq, pctrl->enabled_irqs); @@ -764,9 +764,9 @@ static void msm_gpio_irq_unmask(struct irq_data *d) raw_spin_lock_irqsave(&pctrl->lock, flags); - val = readl(base + g->intr_cfg_reg); + val = readl_relaxed(base + g->intr_cfg_reg); val |= BIT(g->intr_enable_bit); - writel(val, base + g->intr_cfg_reg); + writel_relaxed(val, base + g->intr_cfg_reg); set_bit(d->hwirq, pctrl->enabled_irqs); @@ -790,12 +790,12 @@ static void msm_gpio_irq_ack(struct irq_data *d) raw_spin_lock_irqsave(&pctrl->lock, flags); - val = readl(base + g->intr_status_reg); + val = readl_relaxed(base + g->intr_status_reg); if (g->intr_ack_high) val |= BIT(g->intr_status_bit); else val &= ~BIT(g->intr_status_bit); - writel(val, base + g->intr_status_reg); + writel_relaxed(val, base + g->intr_status_reg); if (test_bit(d->hwirq, pctrl->dual_edge_irqs)) msm_gpio_update_dual_edge_pos(pctrl, g, d); @@ -826,17 +826,17 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) clear_bit(d->hwirq, pctrl->dual_edge_irqs); /* Route interrupts to application cpu */ - val = readl(base + g->intr_target_reg); + val = readl_relaxed(base + g->intr_target_reg); val &= ~(7 << g->intr_target_bit); val |= g->intr_target_kpss_val << g->intr_target_bit; - writel(val, base + g->intr_target_reg); + writel_relaxed(val, base + g->intr_target_reg); /* Update configuration for gpio. * RAW_STATUS_EN is left on for all gpio irqs. Due to the * internal circuitry of TLMM, toggling the RAW_STATUS * could cause the INTR_STATUS to be set for EDGE interrupts. */ - val = readl(base + g->intr_cfg_reg); + val = readl_relaxed(base + g->intr_cfg_reg); val |= BIT(g->intr_raw_status_bit); if (g->intr_detection_width == 2) { val &= ~(3 << g->intr_detection_bit); @@ -884,7 +884,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) } else { BUG(); } - writel(val, base + g->intr_cfg_reg); + writel_relaxed(val, base + g->intr_cfg_reg); if (test_bit(d->hwirq, pctrl->dual_edge_irqs)) msm_gpio_update_dual_edge_pos(pctrl, g, d); @@ -1596,7 +1596,7 @@ static void msm_gpio_irq_handler(struct irq_desc *desc) for_each_set_bit(i, pctrl->enabled_irqs, pctrl->chip.ngpio) { g = &pctrl->soc->groups[i]; base = reassign_pctrl_reg(pctrl->soc, i); - val = readl(base + g->intr_status_reg); + val = readl_relaxed(base + g->intr_status_reg); if (val & BIT(g->intr_status_bit)) { irq_pin = irq_find_mapping(gc->irqdomain, i); generic_handle_irq(irq_pin); diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_com.h b/drivers/platform/msm/ep_pcie/ep_pcie_com.h index a6044f85c39d..014bb9a72055 100644 --- a/drivers/platform/msm/ep_pcie/ep_pcie_com.h +++ b/drivers/platform/msm/ep_pcie/ep_pcie_com.h @@ -207,19 +207,11 @@ } while (0) #define EP_PCIE_DBG(dev, fmt, arg...) do { \ - if ((dev)->ipc_log_ful) \ - ipc_log_string((dev)->ipc_log_ful, "%s: " fmt, __func__, arg); \ if (ep_pcie_get_debug_mask()) \ pr_alert("%s: " fmt, __func__, arg); \ } while (0) #define EP_PCIE_DBG2(dev, fmt, arg...) do { \ - if ((dev)->ipc_log_sel) \ - ipc_log_string((dev)->ipc_log_sel, \ - "DBG1:%s: " fmt, __func__, arg); \ - if ((dev)->ipc_log_ful) \ - ipc_log_string((dev)->ipc_log_ful, \ - "DBG2:%s: " fmt, __func__, arg); \ if (ep_pcie_get_debug_mask()) \ pr_alert("%s: " fmt, __func__, arg); \ } while (0) @@ -227,28 +219,15 @@ #define EP_PCIE_DBG_FS(fmt, arg...) pr_alert("%s: " fmt, __func__, arg) #define EP_PCIE_DUMP(dev, fmt, arg...) do { \ - if ((dev)->ipc_log_dump) \ - ipc_log_string((dev)->ipc_log_dump, \ - "DUMP:%s: " fmt, __func__, arg); \ if (ep_pcie_get_debug_mask()) \ pr_alert("%s: " fmt, __func__, arg); \ } while (0) #define EP_PCIE_INFO(dev, fmt, arg...) do { \ - if ((dev)->ipc_log_sel) \ - ipc_log_string((dev)->ipc_log_sel, \ - "INFO:%s: " fmt, __func__, arg); \ - if ((dev)->ipc_log_ful) \ - ipc_log_string((dev)->ipc_log_ful, "%s: " fmt, __func__, arg); \ pr_info("%s: " fmt, __func__, arg); \ } while (0) #define EP_PCIE_ERR(dev, fmt, arg...) do { \ - if ((dev)->ipc_log_sel) \ - ipc_log_string((dev)->ipc_log_sel, \ - "ERR:%s: " fmt, __func__, arg); \ - if ((dev)->ipc_log_ful) \ - ipc_log_string((dev)->ipc_log_ful, "%s: " fmt, __func__, arg); \ pr_err("%s: " fmt, __func__, arg); \ } while (0) diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h index 59d16f86b34b..2f044f2b0d0a 100644 --- a/drivers/platform/msm/gsi/gsi.h +++ b/drivers/platform/msm/gsi/gsi.h @@ -35,12 +35,7 @@ #define gsi_readl(c) (readl(c)) #define gsi_writel(v, c) ({ __iowmb(); writel_relaxed((v), (c)); }) -#define GSI_IPC_LOGGING(buf, fmt, args...) \ - do { \ - if (buf) \ - ipc_log_string((buf), fmt, __func__, __LINE__, \ - ## args); \ - } while (0) +#define GSI_IPC_LOGGING(buf, fmt, args...) ((void)0) #define GSIDBG(fmt, args...) \ do { \ diff --git a/drivers/platform/msm/ipa/ipa_clients/Makefile b/drivers/platform/msm/ipa/ipa_clients/Makefile index bdd86a9af797..8553e8dbe74b 100644 --- a/drivers/platform/msm/ipa/ipa_clients/Makefile +++ b/drivers/platform/msm/ipa/ipa_clients/Makefile @@ -2,3 +2,4 @@ obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o i obj-$(CONFIG_IPA) += odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o ipa_wdi3.o obj-$(CONFIG_ECM_IPA) += ecm_ipa.o obj-$(CONFIG_RNDIS_IPA) += rndis_ipa.o +CFLAGS_ipa_mhi_client.o := -DCONFIG_DEBUG_FS diff --git a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c index d1378d8f7501..edcdd624fe62 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c +++ b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c @@ -34,23 +34,13 @@ #define IPA_ECM_IPC_LOG_PAGES 50 -#define IPA_ECM_IPC_LOGGING(buf, fmt, args...) \ - do { \ - if (buf) \ - ipc_log_string((buf), fmt, __func__, __LINE__, \ - ## args); \ - } while (0) - +#define IPA_ECM_IPC_LOGGING(buf, fmt, args...) ((void)0) static void *ipa_ecm_logbuf; #define ECM_IPA_DEBUG(fmt, args...) \ do { \ pr_debug(DRIVER_NAME " %s:%d "\ fmt, __func__, __LINE__, ## args);\ - if (ipa_ecm_logbuf) { \ - IPA_ECM_IPC_LOGGING(ipa_ecm_logbuf, \ - DRIVER_NAME " %s:%d " fmt, ## args); \ - } \ } while (0) #define ECM_IPA_DEBUG_XMIT(fmt, args...) \ @@ -60,20 +50,12 @@ static void *ipa_ecm_logbuf; do { \ pr_info(DRIVER_NAME "@%s@%d@ctx:%s: "\ fmt, __func__, __LINE__, current->comm, ## args);\ - if (ipa_ecm_logbuf) { \ - IPA_ECM_IPC_LOGGING(ipa_ecm_logbuf, \ - DRIVER_NAME " %s:%d " fmt, ## args); \ - } \ } while (0) #define ECM_IPA_ERROR(fmt, args...) \ do { \ pr_err(DRIVER_NAME "@%s@%d@ctx:%s: "\ fmt, __func__, __LINE__, current->comm, ## args);\ - if (ipa_ecm_logbuf) { \ - IPA_ECM_IPC_LOGGING(ipa_ecm_logbuf, \ - DRIVER_NAME " %s:%d " fmt, ## args); \ - } \ } while (0) #define NULL_CHECK(ptr) \ diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c index 044d512a0b80..f8252e4ba7ee 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c @@ -76,7 +76,6 @@ static struct dentry *dfile_stats; #define IPA_GSB_AGGR_BYTE_LIMIT 14 #define IPA_GSB_AGGR_TIME_LIMIT 1000 /* 1000 us */ - /** * struct stats - driver statistics, * @num_ul_packets: number of uplink packets diff --git a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c index 634b2a42ab98..6a75e31bae28 100644 --- a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c +++ b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c @@ -62,22 +62,13 @@ #define IPA_RNDIS_IPC_LOG_PAGES 50 -#define IPA_RNDIS_IPC_LOGGING(buf, fmt, args...) \ - do { \ - if (buf) \ - ipc_log_string((buf), fmt, __func__, __LINE__, \ - ## args); \ - } while (0) +#define IPA_RNDIS_IPC_LOGGING(buf, fmt, args...) ((void)0) static void *ipa_rndis_logbuf; #define RNDIS_IPA_DEBUG(fmt, args...) \ do { \ pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\ - if (ipa_rndis_logbuf) { \ - IPA_RNDIS_IPC_LOGGING(ipa_rndis_logbuf, \ - DRV_NAME " %s:%d " fmt, ## args); \ - } \ } while (0) #define RNDIS_IPA_DEBUG_XMIT(fmt, args...) \ @@ -87,10 +78,6 @@ static void *ipa_rndis_logbuf; do { \ pr_err(DRV_NAME "@%s@%d@ctx:%s: "\ fmt, __func__, __LINE__, current->comm, ## args);\ - if (ipa_rndis_logbuf) { \ - IPA_RNDIS_IPC_LOGGING(ipa_rndis_logbuf, \ - DRV_NAME " %s:%d " fmt, ## args); \ - } \ } while (0) #define NULL_CHECK_RETVAL(ptr) \ diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h index 7412855449fb..7f75aa447689 100644 --- a/drivers/platform/msm/ipa/ipa_common_i.h +++ b/drivers/platform/msm/ipa/ipa_common_i.h @@ -346,12 +346,7 @@ struct ipa_hdr_offset_entry { extern const char *ipa_clients_strings[]; -#define IPA_IPC_LOGGING(buf, fmt, args...) \ - do { \ - if (buf) \ - ipc_log_string((buf), fmt, __func__, __LINE__, \ - ## args); \ - } while (0) +#define IPA_IPC_LOGGING(buf, fmt, args...) ((void)0) void ipa_inc_client_enable_clks(struct ipa_active_client_logging_info *id); void ipa_dec_client_disable_clks(struct ipa_active_client_logging_info *id); diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile index a7b53937ac1e..c3f0782afa31 100644 --- a/drivers/platform/msm/ipa/ipa_v3/Makefile +++ b/drivers/platform/msm/ipa/ipa_v3/Makefile @@ -1,3 +1,4 @@ +ccflags-y += -DCONFIG_DEBUG_FS obj-$(CONFIG_IPA3) += ipahal/ obj-$(CONFIG_IPA3) += ipat.o diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index ac42a899b617..710b64fcc37e 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -6761,7 +6761,7 @@ static int ipa3_tag_generate_force_close_desc(struct ipa3_desc desc[], int res; struct ipahal_imm_cmd_register_write reg_write_agg_close; struct ipahal_imm_cmd_pyld *cmd_pyld; - struct ipahal_reg_valmask valmask; + struct ipahal_reg_valmask valmask = {0}; for (i = start_pipe; i < end_pipe; i++) { ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, i, &ep_aggr); diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c index 61116e5f05f4..bb91aceb65b0 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c @@ -1235,9 +1235,9 @@ static int ipa3_wwan_change_mtu(struct net_device *dev, int new_mtu) * later * -EFAULT: Error while transmitting the skb */ -static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev) { - int ret = 0; + netdev_tx_t ret = NETDEV_TX_OK; bool qmap_check; struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev); unsigned long flags; diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c index 8c53d5df1e6f..4442a519e8d8 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c @@ -103,7 +103,7 @@ static long ipa3_wan_ioctl(struct file *filp, IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE :>>>\n", DRIVER_NAME); pyld_sz = sizeof(struct ipa_install_fltr_rule_req_msg_v01); - param = kzalloc(pyld_sz, GFP_KERNEL); + param = kzalloc(pyld_sz, GFP_KERNEL | __GFP_NOWARN); if (!param) { retval = -ENOMEM; break; @@ -128,7 +128,7 @@ static long ipa3_wan_ioctl(struct file *filp, IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE_EX :>>>\n", DRIVER_NAME); pyld_sz = sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01); - param = kzalloc(pyld_sz, GFP_KERNEL); + param = kzalloc(pyld_sz, GFP_KERNEL | __GFP_NOWARN); if (!param) { retval = -ENOMEM; break; @@ -153,7 +153,7 @@ static long ipa3_wan_ioctl(struct file *filp, IPAWANDBG("device %s got WAN_IOC_ADD_OFFLOAD_CONNECTION :>>>\n", DRIVER_NAME); pyld_sz = sizeof(struct ipa_add_offload_connection_req_msg_v01); - param = kzalloc(pyld_sz, GFP_KERNEL); + param = kzalloc(pyld_sz, GFP_KERNEL | __GFP_NOWARN); if (!param) { retval = -ENOMEM; break; @@ -180,7 +180,7 @@ static long ipa3_wan_ioctl(struct file *filp, DRIVER_NAME); pyld_sz = rmv_offload_req__msg_size; - param = kzalloc(pyld_sz, GFP_KERNEL); + param = kzalloc(pyld_sz, GFP_KERNEL | __GFP_NOWARN); if (!param) { retval = -ENOMEM; break; @@ -207,7 +207,7 @@ static long ipa3_wan_ioctl(struct file *filp, DRIVER_NAME); pyld_sz = sizeof(struct ipa_configure_ul_firewall_rules_req_msg_v01); - param = kzalloc(pyld_sz, GFP_KERNEL); + param = kzalloc(pyld_sz, GFP_KERNEL | __GFP_NOWARN); if (!param) { retval = -ENOMEM; break; @@ -233,7 +233,7 @@ static long ipa3_wan_ioctl(struct file *filp, IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE_INDEX :>>>\n", DRIVER_NAME); pyld_sz = sizeof(struct ipa_fltr_installed_notif_req_msg_v01); - param = kzalloc(pyld_sz, GFP_KERNEL); + param = kzalloc(pyld_sz, GFP_KERNEL | __GFP_NOWARN); if (!param) { retval = -ENOMEM; break; @@ -258,7 +258,7 @@ static long ipa3_wan_ioctl(struct file *filp, IPAWANDBG("device %s got WAN_IOC_VOTE_FOR_BW_MBPS :>>>\n", DRIVER_NAME); pyld_sz = sizeof(uint32_t); - param = kzalloc(pyld_sz, GFP_KERNEL); + param = kzalloc(pyld_sz, GFP_KERNEL | __GFP_NOWARN); if (!param) { retval = -ENOMEM; break; @@ -281,7 +281,7 @@ static long ipa3_wan_ioctl(struct file *filp, case WAN_IOC_POLL_TETHERING_STATS: IPAWANDBG_LOW("got WAN_IOCTL_POLL_TETHERING_STATS :>>>\n"); pyld_sz = sizeof(struct wan_ioctl_poll_tethering_stats); - param = kzalloc(pyld_sz, GFP_KERNEL); + param = kzalloc(pyld_sz, GFP_KERNEL | __GFP_NOWARN); if (!param) { retval = -ENOMEM; break; @@ -305,7 +305,7 @@ static long ipa3_wan_ioctl(struct file *filp, case WAN_IOC_SET_DATA_QUOTA: IPAWANDBG_LOW("got WAN_IOCTL_SET_DATA_QUOTA :>>>\n"); pyld_sz = sizeof(struct wan_ioctl_set_data_quota); - param = kzalloc(pyld_sz, GFP_KERNEL); + param = kzalloc(pyld_sz, GFP_KERNEL | __GFP_NOWARN); if (!param) { retval = -ENOMEM; break; @@ -333,7 +333,7 @@ static long ipa3_wan_ioctl(struct file *filp, case WAN_IOC_SET_TETHER_CLIENT_PIPE: IPAWANDBG_LOW("got WAN_IOC_SET_TETHER_CLIENT_PIPE :>>>\n"); pyld_sz = sizeof(struct wan_ioctl_set_tether_client_pipe); - param = kzalloc(pyld_sz, GFP_KERNEL); + param = kzalloc(pyld_sz, GFP_KERNEL | __GFP_NOWARN); if (!param) { retval = -ENOMEM; break; @@ -353,7 +353,7 @@ static long ipa3_wan_ioctl(struct file *filp, case WAN_IOC_QUERY_TETHER_STATS: IPAWANDBG_LOW("got WAN_IOC_QUERY_TETHER_STATS :>>>\n"); pyld_sz = sizeof(struct wan_ioctl_query_tether_stats); - param = kzalloc(pyld_sz, GFP_KERNEL); + param = kzalloc(pyld_sz, GFP_KERNEL | __GFP_NOWARN); if (!param) { retval = -ENOMEM; break; @@ -379,7 +379,7 @@ static long ipa3_wan_ioctl(struct file *filp, case WAN_IOC_QUERY_TETHER_STATS_ALL: IPAWANDBG_LOW("got WAN_IOC_QUERY_TETHER_STATS_ALL :>>>\n"); pyld_sz = sizeof(struct wan_ioctl_query_tether_stats_all); - param = kzalloc(pyld_sz, GFP_KERNEL); + param = kzalloc(pyld_sz, GFP_KERNEL | __GFP_NOWARN); if (!param) { retval = -ENOMEM; break; @@ -406,7 +406,7 @@ static long ipa3_wan_ioctl(struct file *filp, IPAWANDBG_LOW("device %s got WAN_IOC_RESET_TETHER_STATS :>>>\n", DRIVER_NAME); pyld_sz = sizeof(struct wan_ioctl_reset_tether_stats); - param = kzalloc(pyld_sz, GFP_KERNEL); + param = kzalloc(pyld_sz, GFP_KERNEL | __GFP_NOWARN); if (!param) { retval = -ENOMEM; break; @@ -428,7 +428,7 @@ static long ipa3_wan_ioctl(struct file *filp, IPAWANDBG_LOW("device %s got WAN_IOC_NOTIFY_WAN_STATE :>>>\n", DRIVER_NAME); pyld_sz = sizeof(struct wan_ioctl_notify_wan_state); - param = kzalloc(pyld_sz, GFP_KERNEL); + param = kzalloc(pyld_sz, GFP_KERNEL | __GFP_NOWARN); if (!param) { retval = -ENOMEM; break; @@ -454,7 +454,7 @@ static long ipa3_wan_ioctl(struct file *filp, case WAN_IOC_ENABLE_PER_CLIENT_STATS: IPAWANDBG_LOW("got WAN_IOC_ENABLE_PER_CLIENT_STATS :>>>\n"); pyld_sz = sizeof(bool); - param = kzalloc(pyld_sz, GFP_KERNEL); + param = kzalloc(pyld_sz, GFP_KERNEL | __GFP_NOWARN); if (!param) { retval = -ENOMEM; break; @@ -473,7 +473,7 @@ static long ipa3_wan_ioctl(struct file *filp, case WAN_IOC_QUERY_PER_CLIENT_STATS: IPAWANDBG_LOW("got WAN_IOC_QUERY_PER_CLIENT_STATS :>>>\n"); pyld_sz = sizeof(struct wan_ioctl_query_per_client_stats); - param = kzalloc(pyld_sz, GFP_KERNEL); + param = kzalloc(pyld_sz, GFP_KERNEL | __GFP_NOWARN); if (!param) { retval = -ENOMEM; break; @@ -502,7 +502,7 @@ static long ipa3_wan_ioctl(struct file *filp, case WAN_IOC_SET_LAN_CLIENT_INFO: IPAWANDBG_LOW("got WAN_IOC_SET_LAN_CLIENT_INFO :>>>\n"); pyld_sz = sizeof(struct wan_ioctl_lan_client_info); - param = kzalloc(pyld_sz, GFP_KERNEL); + param = kzalloc(pyld_sz, GFP_KERNEL | __GFP_NOWARN); if (!param) { retval = -ENOMEM; break; @@ -522,7 +522,7 @@ static long ipa3_wan_ioctl(struct file *filp, case WAN_IOC_CLEAR_LAN_CLIENT_INFO: IPAWANDBG_LOW("got WAN_IOC_CLEAR_LAN_CLIENT_INFO :>>>\n"); pyld_sz = sizeof(struct wan_ioctl_lan_client_info); - param = kzalloc(pyld_sz, GFP_KERNEL); + param = kzalloc(pyld_sz, GFP_KERNEL | __GFP_NOWARN); if (!param) { retval = -ENOMEM; break; @@ -543,7 +543,7 @@ static long ipa3_wan_ioctl(struct file *filp, case WAN_IOC_SEND_LAN_CLIENT_MSG: IPAWANDBG_LOW("got WAN_IOC_SEND_LAN_CLIENT_MSG :>>>\n"); pyld_sz = sizeof(struct wan_ioctl_send_lan_client_msg); - param = kzalloc(pyld_sz, GFP_KERNEL); + param = kzalloc(pyld_sz, GFP_KERNEL | __GFP_NOWARN); if (!param) { retval = -ENOMEM; break; diff --git a/drivers/platform/msm/mhi_dev/mhi.h b/drivers/platform/msm/mhi_dev/mhi.h index 5dd8559dcaa4..4d1b6706602d 100644 --- a/drivers/platform/msm/mhi_dev/mhi.h +++ b/drivers/platform/msm/mhi_dev/mhi.h @@ -648,10 +648,6 @@ extern void *mhi_ipc_log; if (_msg_lvl >= mhi_msg_lvl) { \ pr_err("[%s] "_msg, __func__, ##__VA_ARGS__); \ } \ - if (mhi_ipc_log && (_msg_lvl >= mhi_ipc_msg_lvl)) { \ - ipc_log_string(mhi_ipc_log, \ - "[0x%x %s] " _msg, bhi_imgtxdb, __func__, ##__VA_ARGS__); \ - } \ } while (0) diff --git a/drivers/platform/msm/mhi_dev/mhi_dev_net.c b/drivers/platform/msm/mhi_dev/mhi_dev_net.c index d01e15711e18..5b19d7b53c0a 100644 --- a/drivers/platform/msm/mhi_dev/mhi_dev_net.c +++ b/drivers/platform/msm/mhi_dev/mhi_dev_net.c @@ -76,10 +76,6 @@ struct mhi_dev_net_chan_attr { if (_msg_lvl >= mhi_net_msg_lvl) { \ pr_err("[%s] "_msg, __func__, ##__VA_ARGS__); \ } \ - if (mhi_net_ipc_log && (_msg_lvl >= mhi_net_ipc_log_lvl)) { \ - ipc_log_string(mhi_net_ipc_log, \ - "[%s] " _msg, __func__, ##__VA_ARGS__); \ - } \ } while (0) module_param(mhi_net_msg_lvl, uint, 0644); @@ -612,7 +608,9 @@ static int mhi_dev_net_close(void) } /* freeing mhi client and IPC context */ kfree(client); - kfree(mhi_net_ipc_log); + if (mhi_net_ipc_log) + kfree(mhi_net_ipc_log); + return 0; } diff --git a/drivers/platform/msm/mhi_dev/mhi_uci.c b/drivers/platform/msm/mhi_dev/mhi_uci.c index 0f80be4fbd9a..73ac3bc1e527 100644 --- a/drivers/platform/msm/mhi_dev/mhi_uci.c +++ b/drivers/platform/msm/mhi_dev/mhi_uci.c @@ -393,10 +393,6 @@ struct mhi_uci_ctxt_t { if (_msg_lvl >= mhi_uci_msg_lvl) { \ pr_err("[%s] "_msg, __func__, ##__VA_ARGS__); \ } \ - if (mhi_uci_ipc_log && (_msg_lvl >= mhi_uci_ipc_log_lvl)) { \ - ipc_log_string(mhi_uci_ipc_log, \ - "[%s] " _msg, __func__, ##__VA_ARGS__); \ - } \ } while (0) diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c index 4b13c802d10a..e4371e67ff4e 100644 --- a/drivers/platform/msm/qcom-geni-se.c +++ b/drivers/platform/msm/qcom-geni-se.c @@ -152,7 +152,7 @@ static int geni_se_iommu_map_and_attach(struct geni_se_device *geni_se_dev); */ unsigned int geni_read_reg_nolog(void __iomem *base, int offset) { - return readl_relaxed_no_log(base + offset); + return readl_relaxed(base + offset); } EXPORT_SYMBOL(geni_read_reg_nolog); @@ -164,7 +164,7 @@ EXPORT_SYMBOL(geni_read_reg_nolog); */ void geni_write_reg_nolog(unsigned int value, void __iomem *base, int offset) { - return writel_relaxed_no_log(value, (base + offset)); + return writel_relaxed(value, (base + offset)); } EXPORT_SYMBOL(geni_write_reg_nolog); @@ -1962,7 +1962,7 @@ static int geni_se_probe(struct platform_device *pdev) geni_se_dev->log_ctx = ipc_log_context_create(NUM_LOG_PAGES, dev_name(geni_se_dev->dev), 0); if (!geni_se_dev->log_ctx) - dev_err(dev, "%s Failed to allocate log context\n", __func__); + dev_dbg(dev, "%s Failed to allocate log context\n", __func__); dev_set_drvdata(dev, geni_se_dev); ret = of_platform_populate(dev->of_node, geni_se_dt_match, NULL, dev); diff --git a/drivers/platform/msm/sps/sps.c b/drivers/platform/msm/sps/sps.c index c9bb3e7586e2..0cc52d8c1226 100644 --- a/drivers/platform/msm/sps/sps.c +++ b/drivers/platform/msm/sps/sps.c @@ -2205,7 +2205,7 @@ int sps_register_bam_device(const struct sps_bam_props *bam_props, bam->ipc_log0 = ipc_log_context_create(SPS_IPC_LOGPAGES, bam_name, 0); if (!bam->ipc_log0) - SPS_ERR(sps, "%s : unable to create IPC Logging 0 for bam %pa", + SPS_DBG(sps, "%s : unable to create IPC Logging 0 for bam %pa", __func__, &bam->props.phys_addr); snprintf(bam_name, sizeof(bam_name), "sps_bam_%pa_1", @@ -2213,7 +2213,7 @@ int sps_register_bam_device(const struct sps_bam_props *bam_props, bam->ipc_log1 = ipc_log_context_create(SPS_IPC_LOGPAGES, bam_name, 0); if (!bam->ipc_log1) - SPS_ERR(sps, "%s : unable to create IPC Logging 1 for bam %pa", + SPS_DBG(sps, "%s : unable to create IPC Logging 1 for bam %pa", __func__, &bam->props.phys_addr); snprintf(bam_name, sizeof(bam_name), "sps_bam_%pa_2", @@ -2221,7 +2221,7 @@ int sps_register_bam_device(const struct sps_bam_props *bam_props, bam->ipc_log2 = ipc_log_context_create(SPS_IPC_LOGPAGES, bam_name, 0); if (!bam->ipc_log2) - SPS_ERR(sps, "%s : unable to create IPC Logging 2 for bam %pa", + SPS_DBG(sps, "%s : unable to create IPC Logging 2 for bam %pa", __func__, &bam->props.phys_addr); snprintf(bam_name, sizeof(bam_name), "sps_bam_%pa_3", @@ -2229,7 +2229,7 @@ int sps_register_bam_device(const struct sps_bam_props *bam_props, bam->ipc_log3 = ipc_log_context_create(SPS_IPC_LOGPAGES, bam_name, 0); if (!bam->ipc_log3) - SPS_ERR(sps, "%s : unable to create IPC Logging 3 for bam %pa", + SPS_DBG(sps, "%s : unable to create IPC Logging 3 for bam %pa", __func__, &bam->props.phys_addr); snprintf(bam_name, sizeof(bam_name), "sps_bam_%pa_4", @@ -2237,7 +2237,7 @@ int sps_register_bam_device(const struct sps_bam_props *bam_props, bam->ipc_log4 = ipc_log_context_create(SPS_IPC_LOGPAGES, bam_name, 0); if (!bam->ipc_log4) - SPS_ERR(sps, "%s : unable to create IPC Logging 4 for bam %pa", + SPS_DBG(sps, "%s : unable to create IPC Logging 4 for bam %pa", __func__, &bam->props.phys_addr); if (bam_props->ipc_loglevel) diff --git a/drivers/platform/msm/sps/spsi.h b/drivers/platform/msm/sps/spsi.h index 690cd38ff8ca..200309cc398c 100644 --- a/drivers/platform/msm/sps/spsi.h +++ b/drivers/platform/msm/sps/spsi.h @@ -117,33 +117,8 @@ extern u8 logging_option; extern u8 debug_level_option; extern u8 print_limit_option; -#define SPS_IPC(idx, dev, msg, args...) do { \ - if (dev) { \ - if ((idx == 0) && (dev)->ipc_log0) \ - ipc_log_string((dev)->ipc_log0, \ - "%s: " msg, __func__, args); \ - else if ((idx == 1) && (dev)->ipc_log1) \ - ipc_log_string((dev)->ipc_log1, \ - "%s: " msg, __func__, args); \ - else if ((idx == 2) && (dev)->ipc_log2) \ - ipc_log_string((dev)->ipc_log2, \ - "%s: " msg, __func__, args); \ - else if ((idx == 3) && (dev)->ipc_log3) \ - ipc_log_string((dev)->ipc_log3, \ - "%s: " msg, __func__, args); \ - else if ((idx == 4) && (dev)->ipc_log4) \ - ipc_log_string((dev)->ipc_log4, \ - "%s: " msg, __func__, args); \ - else \ - pr_debug("sps: no such IPC logging index!\n"); \ - } \ - } while (0) #define SPS_DUMP(msg, args...) do { \ - SPS_IPC(4, sps, msg, args); \ - if (sps) { \ - if (sps->ipc_log4 == NULL) \ - pr_info(msg, ##args); \ - } \ + pr_info(msg, ##args); \ } while (0) #define SPS_ERR(dev, msg, args...) do { \ if (logging_option != 1) { \ @@ -152,7 +127,6 @@ extern u8 print_limit_option; else \ pr_err(msg, ##args); \ } \ - SPS_IPC(3, dev, msg, args); \ } while (0) #define SPS_INFO(dev, msg, args...) do { \ if (logging_option != 1) { \ @@ -161,7 +135,6 @@ extern u8 print_limit_option; else \ pr_info(msg, ##args); \ } \ - SPS_IPC(3, dev, msg, args); \ } while (0) #define SPS_DBG(dev, msg, args...) do { \ if ((unlikely(logging_option > 1)) \ @@ -172,10 +145,6 @@ extern u8 print_limit_option; pr_info(msg, ##args); \ } else \ pr_debug(msg, ##args); \ - if (dev) { \ - if ((dev)->ipc_loglevel <= 0) \ - SPS_IPC(0, dev, msg, args); \ - } \ } while (0) #define SPS_DBG1(dev, msg, args...) do { \ if ((unlikely(logging_option > 1)) \ @@ -186,10 +155,6 @@ extern u8 print_limit_option; pr_info(msg, ##args); \ } else \ pr_debug(msg, ##args); \ - if (dev) { \ - if ((dev)->ipc_loglevel <= 1) \ - SPS_IPC(1, dev, msg, args); \ - } \ } while (0) #define SPS_DBG2(dev, msg, args...) do { \ if ((unlikely(logging_option > 1)) \ @@ -200,10 +165,6 @@ extern u8 print_limit_option; pr_info(msg, ##args); \ } else \ pr_debug(msg, ##args); \ - if (dev) { \ - if ((dev)->ipc_loglevel <= 2) \ - SPS_IPC(2, dev, msg, args); \ - } \ } while (0) #define SPS_DBG3(dev, msg, args...) do { \ if ((unlikely(logging_option > 1)) \ @@ -214,10 +175,6 @@ extern u8 print_limit_option; pr_info(msg, ##args); \ } else \ pr_debug(msg, ##args); \ - if (dev) { \ - if ((dev)->ipc_loglevel <= 3) \ - SPS_IPC(3, dev, msg, args); \ - } \ } while (0) #else #define SPS_DBG3(dev, msg, args...) pr_debug(msg, ##args) diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index b2433184d5cb..87efb10f910d 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -111,7 +111,7 @@ static ssize_t power_supply_show_property(struct device *dev, dev_dbg(dev, "driver has no data for `%s' property\n", attr->attr.name); else if (ret != -ENODEV && ret != -EAGAIN) - dev_err_ratelimited(dev, + dev_dbg_ratelimited(dev, "driver failed to report `%s' property: %zd\n", attr->attr.name, ret); return ret; @@ -510,29 +510,12 @@ void power_supply_init_attrs(struct device_type *dev_type) __power_supply_attrs[i] = &power_supply_attrs[i].attr; } -static char *kstruprdup(const char *str, gfp_t gfp) -{ - char *ret, *ustr; - - ustr = ret = kmalloc(strlen(str) + 1, gfp); - - if (!ret) - return NULL; - - while (*str) - *ustr++ = toupper(*str++); - - *ustr = 0; - - return ret; -} - int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) { struct power_supply *psy = dev_get_drvdata(dev); int ret = 0, j; char *prop_buf; - char *attrname; + char attrname[64]; if (!psy || !psy->desc) { dev_dbg(dev, "No power supply yet\n"); @@ -549,7 +532,8 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) for (j = 0; j < psy->desc->num_properties; j++) { struct device_attribute *attr; - char *line; + const char *str; + char *line, *ustr; attr = &power_supply_attrs[psy->desc->properties[j]]; @@ -568,14 +552,14 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) if (line) *line = 0; - attrname = kstruprdup(attr->attr.name, GFP_KERNEL); - if (!attrname) { - ret = -ENOMEM; - goto out; - } + str = attr->attr.name; + ustr = attrname; + while (*str) + *ustr++ = toupper(*str++); + + *ustr = 0; ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf); - kfree(attrname); if (ret) goto out; } diff --git a/drivers/power/supply/qcom/pmic-voter.c b/drivers/power/supply/qcom/pmic-voter.c index a97a1773b728..59d9da5f493f 100644 --- a/drivers/power/supply/qcom/pmic-voter.c +++ b/drivers/power/supply/qcom/pmic-voter.c @@ -489,7 +489,7 @@ int vote(struct votable *votable, const char *client_str, bool enabled, int val) if (!votable->voted_on || (effective_result != votable->effective_result)) { if (strcmp(votable->name, "FG_WS") != 0) { - pr_info("%s: current vote is now %d voted by %s,%d, previous voted %d\n", + pr_debug("%s: current vote is now %d voted by %s,%d, previous voted %d\n", votable->name, effective_result, get_client_str(votable, effective_id), effective_id, votable->effective_result); diff --git a/drivers/power/supply/qcom/qpnp-fg-gen4.c b/drivers/power/supply/qcom/qpnp-fg-gen4.c index 53fcac9d6639..14d43984c6b5 100644 --- a/drivers/power/supply/qcom/qpnp-fg-gen4.c +++ b/drivers/power/supply/qcom/qpnp-fg-gen4.c @@ -6057,11 +6057,7 @@ static void soc_work_fn(struct work_struct *work) { struct fg_dev *fg = container_of(work, struct fg_dev, soc_work.work); - struct fg_gen4_chip *chip = container_of(fg, - struct fg_gen4_chip, fg); - int msoc = 0, soc = 0, curr_ua = 0, volt_uv = 0, temp = 0; - int esr_uohms = 0; - int cycle_count; + int soc = 0, temp = 0; int rc; static int prev_soc = -EINVAL; @@ -6069,39 +6065,10 @@ static void soc_work_fn(struct work_struct *work) if (rc < 0) pr_err("Error in getting capacity, rc=%d\n", rc); - rc = fg_get_msoc_raw(fg, &msoc); - if (rc < 0) - pr_err("Error in getting msoc, rc=%d\n", rc); - - rc = fg_get_battery_resistance(fg, &esr_uohms); - if (rc < 0) - pr_err("Error in getting esr_uohms, rc=%d\n", rc); - - fg_get_battery_current(fg, &curr_ua); - if (rc < 0) - pr_err("failed to get current, rc=%d\n", rc); - - rc = fg_get_battery_voltage(fg, &volt_uv); - if (rc < 0) - pr_err("failed to get voltage, rc=%d\n", rc); - rc = fg_gen4_get_battery_temp(fg, &temp); if (rc < 0) pr_err("Error in getting batt_temp, rc=%d\n", rc); - rc = get_cycle_count(chip->counter, &cycle_count); - if (rc < 0) - pr_err("failed to get cycle count, rc=%d\n", rc); - - pr_info("adjust_soc: s %d r %d i %d v %d t %d cc %d m 0x%02x\n", - soc, - esr_uohms, - curr_ua/1000, - volt_uv/1000, - temp, - cycle_count, - msoc); - if (temp < 450 && fg->last_batt_temp >= 450) { /* follow the way that fg_notifier_cb use wake lock */ pm_stay_awake(fg->dev); diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c index cd4428b95fcc..48836fd7a455 100644 --- a/drivers/power/supply/qcom/qpnp-smb5.c +++ b/drivers/power/supply/qcom/qpnp-smb5.c @@ -234,6 +234,9 @@ struct smb_dt_props { int term_current_thresh_hi_ma; int term_current_thresh_lo_ma; int disable_suspend_on_collapse; + int wdog_snarl_disable; + const char *batt_psy_name; + }; struct smb5 { @@ -971,6 +974,10 @@ static int smb5_parse_dt(struct smb5 *chip) return rc; rc = smblib_get_iio_channel(chg, "project_gpio6", &chg->iio.project_gpio6); + + chip->dt.wdog_snarl_disable = of_property_read_bool(node, + "google,wdog_snarl_disable"); + if (rc < 0) return rc; @@ -4017,6 +4024,12 @@ static int smb5_request_interrupts(struct smb5 *chip) enable_irq_wake(chg->irq_info[BAT_TEMP_IRQ].irq); chg->batt_temp_irq_enabled = true; } + + if (chg->irq_info[WDOG_SNARL_IRQ].irq && chip->dt.wdog_snarl_disable) { + disable_irq_wake(chg->irq_info[WDOG_SNARL_IRQ].irq); + disable_irq_nosync(chg->irq_info[WDOG_SNARL_IRQ].irq); + } + vote(chg->limited_irq_disable_votable, CHARGER_TYPE_VOTER, true, 0); vote(chg->hdc_irq_disable_votable, CHARGER_TYPE_VOTER, true, 0); diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c index fc087b100301..92438aa27b99 100644 --- a/drivers/power/supply/qcom/smb-lib.c +++ b/drivers/power/supply/qcom/smb-lib.c @@ -866,6 +866,8 @@ static int smblib_get_pulse_cnt(struct smb_charger *chg, int *count) #define USBIN_500MA 500000 #define USBIN_900MA 900000 +#define SUSPEND_ICL_MAX USBIN_25MA + static int set_sdp_current(struct smb_charger *chg, int icl_ua) { int rc; @@ -947,8 +949,8 @@ int smblib_set_icl_current(struct smb_charger *chg, int icl_ua) int rc = 0; bool override; - /* suspend and return if 25mA or less is requested */ - if (icl_ua <= USBIN_25MA) + /* suspend and return if below ICL min is requested */ + if (icl_ua <= SUSPEND_ICL_MAX) return smblib_set_usb_suspend(chg, true); if (icl_ua == INT_MAX) @@ -1218,7 +1220,7 @@ static int smblib_dc_icl_vote_callback(struct votable *votable, void *data, icl_ua = 0; } - suspend = (icl_ua <= USBIN_25MA); + suspend = (icl_ua <= SUSPEND_ICL_MAX); if (suspend) goto suspend; @@ -2823,7 +2825,7 @@ int smblib_set_prop_sdp_current_max(struct smb_charger *chg, if (!chg->pd_active) { rc = smblib_handle_usb_current(chg, val->intval); } else if (chg->system_suspend_supported) { - if (val->intval <= USBIN_25MA) + if (val->intval <= SUSPEND_ICL_MAX) rc = vote(chg->usb_icl_votable, PD_SUSPEND_SUPPORTED_VOTER, true, val->intval); else @@ -4631,7 +4633,8 @@ irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data) /* skip suspending input if its already suspended by some other voter */ usb_icl = get_effective_result(chg->usb_icl_votable); - if ((stat & USE_USBIN_BIT) && usb_icl >= 0 && usb_icl <= USBIN_25MA) + if ((stat & USE_USBIN_BIT) && usb_icl >= 0 && + usb_icl <= SUSPEND_ICL_MAX) return IRQ_HANDLED; if (stat & USE_DCIN_BIT) diff --git a/drivers/power/supply/qcom/smb1390-charger.c b/drivers/power/supply/qcom/smb1390-charger.c index b5acbdd8e85d..af2d0378f78a 100644 --- a/drivers/power/supply/qcom/smb1390-charger.c +++ b/drivers/power/supply/qcom/smb1390-charger.c @@ -1095,6 +1095,7 @@ static struct platform_driver smb1390_driver = { .name = "qcom,smb1390-charger", .owner = THIS_MODULE, .of_match_table = match_table, + .probe_type = PROBE_FORCE_SYNCHRONOUS, }, .probe = smb1390_probe, .remove = smb1390_remove, diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c index 85c5021bc104..70b12f600046 100644 --- a/drivers/power/supply/qcom/smb5-lib.c +++ b/drivers/power/supply/qcom/smb5-lib.c @@ -570,7 +570,7 @@ static const struct apsd_result *smblib_get_apsd_result(struct smb_charger *chg) rc = smblib_read(chg, APSD_STATUS_REG, &apsd_stat); if (rc < 0) { - smblib_err(chg, "Couldn't read APSD_STATUS rc=%d\n", rc); + smblib_dbg(chg, PR_REGISTER, "Couldn't read APSD_STATUS rc=%d\n", rc); return result; } smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", apsd_stat); @@ -580,7 +580,7 @@ static const struct apsd_result *smblib_get_apsd_result(struct smb_charger *chg) rc = smblib_read(chg, APSD_RESULT_STATUS_REG, &stat); if (rc < 0) { - smblib_err(chg, "Couldn't read APSD_RESULT_STATUS rc=%d\n", + smblib_dbg(chg, PR_REGISTER, "Couldn't read APSD_RESULT_STATUS rc=%d\n", rc); return result; } @@ -1563,7 +1563,7 @@ int smblib_set_icl_current(struct smb_charger *chg, int icl_ua) int rc = 0; enum icl_override_mode icl_override = HW_AUTO_MODE; /* suspend if 25mA or less is requested */ - bool suspend = (icl_ua <= USBIN_25MA); + bool suspend = (icl_ua <= SUSPEND_ICL_MAX); /* Do not configure ICL from SW for DAM cables */ if (smblib_get_prop_typec_mode(chg) == @@ -1828,7 +1828,7 @@ static int smblib_dc_icl_vote_callback(struct votable *votable, void *data, icl_ua = 0; } - suspend = (icl_ua <= USBIN_25MA); + suspend = (icl_ua <= SUSPEND_ICL_MAX); if (suspend) goto suspend; @@ -2833,7 +2833,7 @@ static int smblib_dc_therm_charging(struct smb_charger *chg, int temp_level) { int thermal_icl_ua = 0; - int rc; + int rc = 0; union power_supply_propval pval = {0, }; union power_supply_propval val = {0, }; @@ -2884,7 +2884,16 @@ static int smblib_dc_therm_charging(struct smb_charger *chg, thermal_icl_ua = chg->thermal_mitigation_bpp[temp_level]; break; } - vote(chg->dc_icl_votable, THERMAL_DAEMON_VOTER, true, thermal_icl_ua); + + if (temp_level == 0) { + /* if therm_lvl_sel is 0, clear thermal voter */ + rc = vote(chg->usb_icl_votable, THERMAL_DAEMON_VOTER, false, 0); + rc = vote(chg->fcc_votable, THERMAL_DAEMON_VOTER, false, 0); + } else { + if (thermal_icl_ua > 0) + rc = vote(chg->usb_icl_votable, THERMAL_DAEMON_VOTER, + true, thermal_icl_ua); + } return rc; } @@ -3014,9 +3023,9 @@ static int smblib_therm_charging(struct smb_charger *chg) pr_err("Couldn't disable USB thermal ICL vote rc=%d\n", rc); } else { - pr_info("thermal_icl_ua is %d, chg->system_temp_level: %d\n", + pr_debug("thermal_icl_ua is %d, chg->system_temp_level: %d\n", thermal_icl_ua, chg->system_temp_level); - pr_info("thermal_fcc_ua is %d\n", thermal_fcc_ua); + pr_debug("thermal_fcc_ua is %d\n", thermal_fcc_ua); if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_HVDCP_3 || (chg->cp_reason == POWER_SUPPLY_CP_PPS @@ -3916,11 +3925,9 @@ int smblib_get_prop_dc_voltage_now(struct smb_charger *chg, rc = power_supply_get_property(chg->wls_psy, POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION, val); - if (rc < 0) { + if (rc < 0) dev_err(chg->dev, "Couldn't get POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION, rc=%d\n", rc); - return rc; - } return rc; } @@ -5135,7 +5142,7 @@ int smblib_set_prop_sdp_current_max(struct smb_charger *chg, if (pval.intval && (val->intval != 0)) rc = smblib_handle_usb_current(chg, val->intval); } else if (chg->system_suspend_supported) { - if (val->intval <= USBIN_25MA) + if (val->intval <= SUSPEND_ICL_MAX) rc = vote(chg->usb_icl_votable, PD_SUSPEND_SUPPORTED_VOTER, true, val->intval); else @@ -7996,7 +8003,8 @@ irqreturn_t switcher_power_ok_irq_handler(int irq, void *data) /* skip suspending input if its already suspended by some other voter */ usb_icl = get_effective_result(chg->usb_icl_votable); - if ((stat & USE_USBIN_BIT) && usb_icl >= 0 && usb_icl <= USBIN_25MA) + if ((stat & USE_USBIN_BIT) && usb_icl >= 0 && + usb_icl <= SUSPEND_ICL_MAX) return IRQ_HANDLED; if (stat & USE_DCIN_BIT) diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h index d5a5889abcaf..3d255548473d 100644 --- a/drivers/power/supply/qcom/smb5-lib.h +++ b/drivers/power/supply/qcom/smb5-lib.h @@ -179,6 +179,8 @@ enum hvdcp3_type { #define ROLE_REVERSAL_DELAY_MS 2000 +#define SUSPEND_ICL_MAX USBIN_25MA + enum smb_mode { PARALLEL_MASTER = 0, PARALLEL_SLAVE, diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 4b6c0622cf34..fc2fec8d72b6 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -903,7 +903,7 @@ void pwm_put(struct pwm_device *pwm) mutex_lock(&pwm_lock); if (!test_and_clear_bit(PWMF_REQUESTED, &pwm->flags)) { - pr_warn("PWM device already freed\n"); + pr_debug("PWM device already freed\n"); goto out; } diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 4fd97e56f621..f7d63e39aadb 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -36,25 +36,13 @@ #include "qcom_glink_native.h" #define GLINK_LOG_PAGE_CNT 2 -#define GLINK_INFO(ctxt, x, ...) \ -do { \ - if (ctxt) \ - ipc_log_string(ctxt, "[%s]: "x, __func__, ##__VA_ARGS__); \ -} while (0) - -#define CH_INFO(ch, x, ...) \ -do { \ - if (ch->glink && ch->glink->ilc) \ - ipc_log_string(ch->glink->ilc, "%s[%d:%d] %s: "x, ch->name, \ - ch->lcid, ch->rcid, __func__, ##__VA_ARGS__); \ -} while (0) +#define GLINK_INFO(ctxt, x, ...) ((void)0) +#define CH_INFO(ch, x, ...) ((void)0) #define GLINK_ERR(ctxt, x, ...) \ do { \ pr_err_ratelimited("[%s]: "x, __func__, ##__VA_ARGS__); \ - if (ctxt) \ - ipc_log_string(ctxt, "[%s]: "x, __func__, ##__VA_ARGS__); \ } while (0) #define GLINK_NAME_SIZE 32 diff --git a/drivers/rpmsg/qcom_glink_spi.c b/drivers/rpmsg/qcom_glink_spi.c index 96f100745065..178505d5677b 100644 --- a/drivers/rpmsg/qcom_glink_spi.c +++ b/drivers/rpmsg/qcom_glink_spi.c @@ -35,25 +35,13 @@ #include "qcom_glink_native.h" #define GLINK_LOG_PAGE_CNT 2 -#define GLINK_INFO(ctxt, x, ...) \ -do { \ - if (ctxt->ilc) \ - ipc_log_string(ctxt->ilc, "[%s]: "x, __func__, ##__VA_ARGS__); \ -} while (0) - -#define CH_INFO(ch, x, ...) \ -do { \ - if (ch->glink && ch->glink->ilc) \ - ipc_log_string(ch->glink->ilc, "%s[%d:%d] %s: "x, ch->name, \ - ch->lcid, ch->rcid, __func__, ##__VA_ARGS__); \ -} while (0) +#define GLINK_INFO(ctxt, x, ...) ((void)0) +#define CH_INFO(ch, x, ...) ((void)0) #define GLINK_ERR(ctxt, x, ...) \ do { \ pr_err_ratelimited("[%s]: "x, __func__, ##__VA_ARGS__); \ - if (ctxt->ilc) \ - ipc_log_string(ctxt->ilc, "[%s]: "x, __func__, ##__VA_ARGS__); \ } while (0) #define SPI_ALIGNMENT 16 diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 356729c9544b..fe3fff2ebbfe 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -764,6 +764,7 @@ static void ufshcd_cmd_log_init(struct ufs_hba *hba) { } +#ifdef CONFIG_TRACEPOINTS static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type, unsigned int tag, u8 cmd_id, u8 idn, u8 lun, sector_t lba, int transfer_len) @@ -781,6 +782,7 @@ static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type, ufshcd_add_command_trace(hba, &entry, strcmp(cmd_type, "clk-gating")); } +#endif static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id) { @@ -2057,6 +2059,26 @@ static void ufshcd_resume_clkscaling(struct ufs_hba *hba) devfreq_resume_device(hba->devfreq); } +static int bogus_clkscale_enable = 1; +static ssize_t ufshcd_bogus_clkscale_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", bogus_clkscale_enable); +} + +static ssize_t ufshcd_bogus_clkscale_enable_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + u32 value; + + if (kstrtou32(buf, 0, &value)) + return -EINVAL; + + bogus_clkscale_enable = !!value; + + return count; +} + static ssize_t ufshcd_clkscale_enable_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -2103,10 +2125,15 @@ out: return count; } -static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba) +static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba, bool bogus) { - hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show; - hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store; + if (bogus) { + hba->clk_scaling.enable_attr.show = ufshcd_bogus_clkscale_enable_show; + hba->clk_scaling.enable_attr.store = ufshcd_bogus_clkscale_enable_store; + } else { + hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show; + hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store; + } sysfs_attr_init(&hba->clk_scaling.enable_attr.attr); hba->clk_scaling.enable_attr.attr.name = "clkscale_enable"; hba->clk_scaling.enable_attr.attr.mode = 0644; @@ -2970,6 +2997,7 @@ static void ufshcd_init_hibern8_on_idle(struct ufs_hba *hba) * auto hibern8 is supported */ hba->caps &= ~UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE; + hba->hibern8_on_idle.is_enabled = true; return; } else { hba->hibern8_on_idle.delay_ms = 10; @@ -11262,7 +11290,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) mb(); /* IRQ registration */ - err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, + err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED | IRQF_PERF_CRITICAL, dev_name(dev), hba); if (err) { dev_err(hba->dev, "request irq failed\n"); @@ -11310,7 +11338,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) host->host_no); hba->clk_scaling.workq = create_singlethread_workqueue(wq_name); - ufshcd_clkscaling_init_sysfs(hba); + ufshcd_clkscaling_init_sysfs(hba, false); + } else { + ufshcd_clkscaling_init_sysfs(hba, true); } /* diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c index 9ae3626f30a6..681e244def0d 100644 --- a/drivers/slimbus/slim-msm-ngd.c +++ b/drivers/slimbus/slim-msm-ngd.c @@ -1061,7 +1061,7 @@ static int ngd_allocbw(struct slim_device *sb, int *subfrmc, int *clkgear) struct slim_controller *ctrl = sb->ctrl; DECLARE_COMPLETION_ONSTACK(done); u8 wbuf[SLIM_MSGQ_BUF_LEN]; - struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl); + struct msm_slim_ctrl __maybe_unused *dev = slim_get_ctrldata(ctrl); *clkgear = ctrl->clkgear; *subfrmc = 0; @@ -1817,7 +1817,7 @@ static int ngd_slim_probe(struct platform_device *pdev) dev->ipc_slimbus_log = ipc_log_context_create(IPC_SLIMBUS_LOG_PAGES, dev_name(dev->dev), 0); if (!dev->ipc_slimbus_log) - dev_err(&pdev->dev, "error creating ipc_logging context\n"); + dev_dbg(&pdev->dev, "error creating ipc_logging context\n"); else { /* Initialize the log mask */ dev->ipc_log_mask = INFO_LEV; @@ -1834,7 +1834,7 @@ static int ngd_slim_probe(struct platform_device *pdev) ipc_log_context_create(IPC_SLIMBUS_LOG_PAGES, ipc_err_log_name, 0); if (!dev->ipc_slimbus_log_err) - dev_err(&pdev->dev, + dev_dbg(&pdev->dev, "error creating ipc_error_logging context\n"); else SLIM_INFO(dev, "start error logging for slim dev %s\n", diff --git a/drivers/slimbus/slim-msm.h b/drivers/slimbus/slim-msm.h index 7530c53e1e46..86cb927d4484 100644 --- a/drivers/slimbus/slim-msm.h +++ b/drivers/slimbus/slim-msm.h @@ -375,33 +375,15 @@ enum { /* Default IPC log level INFO */ #define SLIM_DBG(dev, x...) do { \ pr_debug(x); \ - if (dev->ipc_slimbus_log && dev->ipc_log_mask >= DBG_LEV) { \ - ipc_log_string(dev->ipc_slimbus_log, x); \ - } \ - if (dev->ipc_slimbus_log_err && dev->ipc_log_mask == FATAL_LEV) { \ - ipc_log_string(dev->ipc_slimbus_log_err, x); \ - } \ } while (0) #define SLIM_INFO(dev, x...) do { \ pr_debug(x); \ - if (dev->ipc_slimbus_log && dev->ipc_log_mask >= INFO_LEV) {\ - ipc_log_string(dev->ipc_slimbus_log, x); \ - } \ - if (dev->ipc_slimbus_log_err && dev->ipc_log_mask == FATAL_LEV) { \ - ipc_log_string(dev->ipc_slimbus_log_err, x); \ - } \ } while (0) /* warnings and errors show up on console always */ #define SLIM_WARN(dev, x...) do { \ - if (dev->ipc_slimbus_log && dev->ipc_log_mask >= WARN_LEV) { \ - pr_warn(x); \ - ipc_log_string(dev->ipc_slimbus_log, x); \ - } \ - if (dev->ipc_slimbus_log_err && dev->ipc_log_mask == FATAL_LEV) { \ - ipc_log_string(dev->ipc_slimbus_log_err, x); \ - } \ + pr_warn(x); \ } while (0) /* ERROR condition in the driver sets the hs_serial_debug_mask @@ -409,15 +391,7 @@ enum { * in IPC logging. Further errors continue to log on the error IPC logging. */ #define SLIM_ERR(dev, x...) do { \ - if (dev->ipc_slimbus_log && dev->ipc_log_mask >= ERR_LEV) { \ - pr_err(x); \ - ipc_log_string(dev->ipc_slimbus_log, x); \ - dev->default_ipc_log_mask = dev->ipc_log_mask; \ - dev->ipc_log_mask = FATAL_LEV; \ - } \ - if (dev->ipc_slimbus_log_err && dev->ipc_log_mask == FATAL_LEV) { \ - ipc_log_string(dev->ipc_slimbus_log_err, x); \ - } \ + pr_err(x); \ } while (0) #define SLIM_RST_LOGLVL(dev) { \ diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index dbd5759e94de..a756f38b2bd2 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -751,7 +751,7 @@ config QCOM_QDSS_BRIDGE config QTI_RPM_STATS_LOG bool "Qualcomm Technologies RPM Stats Driver" - depends on DEBUG_FS + depends on DEBUG_FS || ANDROID default n help This option enables a driver which reads RPM messages from a shared @@ -794,7 +794,7 @@ config MSM_AVTIMER config MSM_PM depends on PM - select MSM_IDLE_STATS if DEBUG_FS + select MSM_IDLE_STATS select CPU_IDLE_MULTIPLE_DRIVERS bool "Qualcomm Techonolgies Inc (QTI) platform specific PM driver" help diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 53f7674bab11..fc6e841b9790 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -114,3 +114,7 @@ obj-$(CONFIG_QCOM_ADSP_MANUAL_VOTE) += adsp_vote_qmi.o adsp_lpm_voting_v01.o obj-$(CONFIG_CPU_V7) += idle-v7.o obj-$(CONFIG_MSM_BAM_DMUX) += bam_dmux.o obj-$(CONFIG_WCNSS_CORE) += wcnss/ + +CFLAGS_rpm_stats.o += -DCONFIG_DEBUG_FS +CFLAGS_rpm_master_stat.o += -DCONFIG_DEBUG_FS +CFLAGS_rpmh_master_stat.o += -DCONFIG_DEBUG_FS diff --git a/drivers/soc/qcom/boot_stats.c b/drivers/soc/qcom/boot_stats.c index 3b0eda02d690..57b3bc94f068 100644 --- a/drivers/soc/qcom/boot_stats.c +++ b/drivers/soc/qcom/boot_stats.c @@ -96,11 +96,11 @@ unsigned long long int msm_timer_get_sclk_ticks(void) if (!sclk_tick) return -EINVAL; while (loop_zero_count--) { - t1 = __raw_readl_no_log(sclk_tick); + t1 = __raw_readl(sclk_tick); do { udelay(1); t2 = t1; - t1 = __raw_readl_no_log(sclk_tick); + t1 = __raw_readl(sclk_tick); } while ((t2 != t1) && --loop_count); if (!loop_count) { pr_err("boot_stats: SCLK did not stabilize\n"); diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c index 1a8add7559f7..b27f7f78d076 100644 --- a/drivers/soc/qcom/dcc_v2.c +++ b/drivers/soc/qcom/dcc_v2.c @@ -179,7 +179,7 @@ static void dcc_sram_memset(const struct device *dev, void __iomem *dst, } while (count >= 4) { - __raw_writel_no_log(qc, dst); + __raw_writel(qc, dst); dst += 4; count -= 4; } @@ -195,7 +195,7 @@ static int dcc_sram_memcpy(void *to, const void __iomem *from, } while (count >= 4) { - *(unsigned int *)to = __raw_readl_no_log(from); + *(unsigned int *)to = __raw_readl(from); to += 4; from += 4; count -= 4; diff --git a/drivers/soc/qcom/glink_pkt.c b/drivers/soc/qcom/glink_pkt.c index 1ebaf5ea459b..fe2b3289916d 100644 --- a/drivers/soc/qcom/glink_pkt.c +++ b/drivers/soc/qcom/glink_pkt.c @@ -38,18 +38,11 @@ enum { GLINK_PKT_INFO = 1U << 0, }; -#define GLINK_PKT_INFO(x, ...) \ -do { \ - if (glink_pkt_debug_mask & GLINK_PKT_INFO) { \ - ipc_log_string(glink_pkt_ilctxt, \ - "[%s]: "x, __func__, ##__VA_ARGS__); \ - } \ -} while (0) +#define GLINK_PKT_INFO(x, ...) ((void)0) #define GLINK_PKT_ERR(x, ...) \ do { \ pr_err_ratelimited("[%s]: "x, __func__, ##__VA_ARGS__); \ - ipc_log_string(glink_pkt_ilctxt, "[%s]: "x, __func__, ##__VA_ARGS__); \ } while (0) #define SMD_DTR_SIG BIT(31) diff --git a/drivers/soc/qcom/glink_probe.c b/drivers/soc/qcom/glink_probe.c index 7b4d99d2ef6d..2d094a15f780 100644 --- a/drivers/soc/qcom/glink_probe.c +++ b/drivers/soc/qcom/glink_probe.c @@ -26,17 +26,11 @@ static void *glink_ilc; static DEFINE_MUTEX(ssr_lock); -#define GLINK_INFO(x, ...) \ -do { \ - if (glink_ilc) \ - ipc_log_string(glink_ilc, "[%s]: "x, __func__, ##__VA_ARGS__); \ -} while (0) +#define GLINK_INFO(x, ...) ((void)0) #define GLINK_ERR(dev, x, ...) \ do { \ dev_err(dev, "[%s]: "x, __func__, ##__VA_ARGS__); \ - if (glink_ilc) \ - ipc_log_string(glink_ilc, "[%s]: "x, __func__, ##__VA_ARGS__); \ } while (0) diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index 76a00a74ee1e..abf0aa87eddd 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -3582,7 +3582,7 @@ static int icnss_debugfs_create(struct icnss_priv *priv) if (IS_ERR(root_dentry)) { ret = PTR_ERR(root_dentry); - icnss_pr_err("Unable to create debugfs %d\n", ret); + icnss_pr_dbg("Unable to create debugfs %d\n", ret); return ret; } diff --git a/drivers/soc/qcom/jtagv8-etm.c b/drivers/soc/qcom/jtagv8-etm.c index e118e51e624e..a3080251aaf8 100644 --- a/drivers/soc/qcom/jtagv8-etm.c +++ b/drivers/soc/qcom/jtagv8-etm.c @@ -186,7 +186,7 @@ /* spread out etm register write */ #define etm_writel(etm, val, off) \ do { \ - writel_relaxed_no_log(val, etm->base + off); \ + writel_relaxed(val, etm->base + off); \ udelay(20); \ } while (0) @@ -194,13 +194,13 @@ do { \ __raw_writel(val, etm->base + off) #define etm_readl(etm, off) \ - readl_relaxed_no_log(etm->base + off) + readl_relaxed(etm->base + off) #define etm_writeq(etm, val, off) \ - writeq_relaxed_no_log(val, etm->base + off) + writeq_relaxed(val, etm->base + off) #define etm_readq(etm, off) \ - readq_relaxed_no_log(etm->base + off) + readq_relaxed(etm->base + off) #define ETM_LOCK(base) \ do { \ diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c index 05227358b072..a96b55cab9fb 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c @@ -25,6 +25,8 @@ #define NUM_LNODES 3 #define MAX_STR_CL 50 +#define DEBUG_REC_TRANSACTION 0 + struct bus_search_type { struct list_head link; struct list_head node_list; @@ -1263,7 +1265,8 @@ static int update_bw_adhoc(struct msm_bus_client_handle *cl, u64 ab, u64 ib) if (!strcmp(test_cl, cl->name)) log_transaction = true; - msm_bus_dbg_rec_transaction(cl, ab, ib); + if (DEBUG_REC_TRANSACTION) + msm_bus_dbg_rec_transaction(cl, ab, ib); if ((cl->cur_act_ib == ib) && (cl->cur_act_ab == ab)) { MSM_BUS_DBG("%s:no change in request", cl->name); @@ -1324,7 +1327,9 @@ static int update_bw_context(struct msm_bus_client_handle *cl, u64 act_ab, if (!slp_ab && !slp_ib) cl->active_only = true; - msm_bus_dbg_rec_transaction(cl, cl->cur_act_ab, cl->cur_dual_ib); + if (DEBUG_REC_TRANSACTION) + msm_bus_dbg_rec_transaction(cl, cl->cur_act_ab, + cl->cur_dual_ib); ret = update_path(cl->mas_dev, cl->slv, act_ib, act_ab, slp_ib, slp_ab, cl->cur_act_ab, cl->cur_act_ab, cl->first_hop, cl->active_only); diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c index a9e4bfa1657a..d376f14e7dd4 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c @@ -1338,8 +1338,7 @@ exit_register_client: return handle; } -static int update_client_paths(struct msm_bus_client *client, bool log_trns, - unsigned int idx) +static int update_client_paths(struct msm_bus_client *client, unsigned int idx) { int lnode, src, dest, cur_idx; uint64_t req_clk, req_bw, curr_clk, curr_bw, slp_clk, slp_bw; @@ -1409,17 +1408,13 @@ static int update_client_paths(struct msm_bus_client *client, bool log_trns, if (dev) msm_bus_commit_single(dev); } - - if (log_trns) - getpath_debug(src, lnode, pdata->active_only); } commit_data(); exit_update_client_paths: return ret; } -static int update_client_alc(struct msm_bus_client *client, bool log_trns, - unsigned int idx) +static int update_client_alc(struct msm_bus_client *client, unsigned int idx) { int lnode, cur_idx; uint64_t req_idle_time, req_fal, dual_idle_time, dual_fal, @@ -1598,7 +1593,7 @@ static int update_context(uint32_t cl, bool active_only, pdata->active_only = active_only; msm_bus_dbg_client_data(client->pdata, ctx_idx, cl); - ret = update_client_paths(client, false, ctx_idx); + ret = update_client_paths(client, ctx_idx); if (ret) { pr_err("%s: Err updating path\n", __func__); goto exit_update_context; @@ -1616,8 +1611,6 @@ static int update_request_adhoc(uint32_t cl, unsigned int index) int ret = 0; struct msm_bus_scale_pdata *pdata; struct msm_bus_client *client; - const char *test_cl = "Null"; - bool log_transaction = false; rt_mutex_lock(&msm_bus_adhoc_lock); @@ -1655,17 +1648,14 @@ static int update_request_adhoc(uint32_t cl, unsigned int index) goto exit_update_request; } - if (!strcmp(test_cl, pdata->name)) - log_transaction = true; - MSM_BUS_DBG("%s: cl: %u index: %d curr: %d num_paths: %d\n", __func__, cl, index, client->curr, client->pdata->usecase->num_paths); if (pdata->alc) - ret = update_client_alc(client, log_transaction, index); + ret = update_client_alc(client, index); else { msm_bus_dbg_client_data(client->pdata, index, cl); - ret = update_client_paths(client, log_transaction, index); + ret = update_client_paths(client, index); } if (ret) { pr_err("%s: Err updating path\n", __func__); diff --git a/drivers/soc/qcom/msm_bus/msm_bus_core.h b/drivers/soc/qcom/msm_bus/msm_bus_core.h index b34c22961baa..a1d197bbf98b 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_core.h +++ b/drivers/soc/qcom/msm_bus/msm_bus_core.h @@ -328,7 +328,7 @@ int msm_bus_noc_hw_init(struct msm_bus_fabric_registration *pdata, struct msm_bus_hw_algorithm *hw_algo); int msm_bus_bimc_hw_init(struct msm_bus_fabric_registration *pdata, struct msm_bus_hw_algorithm *hw_algo); -#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_QCOM_BUS_SCALING) +#if 0 void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, int index, uint32_t cl); void msm_bus_dbg_commit_data(const char *fabname, void *cdata, diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rules.c b/drivers/soc/qcom/msm_bus/msm_bus_rules.c index e01ccb3f4f1e..3f8c259edb81 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_rules.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_rules.c @@ -16,8 +16,6 @@ #include #include #include -#define CREATE_TRACE_POINTS -#include struct node_vote_info { int id; @@ -215,11 +213,6 @@ static void match_rule(struct rule_update_path_info *inp_node, continue; if (check_rule(rule, inp_node)) { - trace_bus_rules_matches( - (node->cur_rule ? - node->cur_rule->rule_id : -1), - inp_node->id, inp_node->ab, - inp_node->ib, inp_node->clk); if (rule->state == RULE_STATE_NOT_APPLIED) rule->state_change = true; diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c index ab6792932e2e..cb4052770d99 100644 --- a/drivers/soc/qcom/peripheral-loader.c +++ b/drivers/soc/qcom/peripheral-loader.c @@ -1749,7 +1749,7 @@ static int __init msm_pil_init(void) pil_ipc_log = ipc_log_context_create(2, "PIL-IPC", 0); if (!pil_ipc_log) - pr_warn("Failed to setup PIL ipc logging\n"); + pr_debug("Failed to setup PIL ipc logging\n"); out: return register_pm_notifier(&pil_pm_notifier); } diff --git a/drivers/soc/qcom/qsee_ipc_irq_bridge.c b/drivers/soc/qcom/qsee_ipc_irq_bridge.c index 0c11c9a0b97b..7d215dbc2efc 100644 --- a/drivers/soc/qcom/qsee_ipc_irq_bridge.c +++ b/drivers/soc/qcom/qsee_ipc_irq_bridge.c @@ -27,16 +27,11 @@ #define NUM_LOG_PAGES 4 #define QIIB_DBG(x...) do { \ - if (qiib_info->log_ctx) \ - ipc_log_string(qiib_info->log_ctx, x); \ - else \ - pr_debug(x); \ + pr_debug(x); \ } while (0) #define QIIB_ERR(x...) do { \ pr_err(x); \ - if (qiib_info->log_ctx) \ - ipc_log_string(qiib_info->log_ctx, x); \ } while (0) static void qiib_cleanup(void); diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c index 6cc4f35ff614..4d67392017ad 100644 --- a/drivers/soc/qcom/watchdog_v2.c +++ b/drivers/soc/qcom/watchdog_v2.c @@ -564,6 +564,9 @@ static void configure_bark_dump(struct msm_watchdog_data *wdog_dd) int cpu; void *cpu_buf; + if (!IS_ENABLED(CONFIG_QCOM_MEMORY_DUMP_V2)) + return; + cpu_data = kcalloc(num_present_cpus(), sizeof(struct msm_dump_data), GFP_KERNEL); if (!cpu_data) diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index 723d49bbc5bb..8f57416b930f 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -56,6 +56,8 @@ struct pages_mem { u32 size; }; +static struct kmem_cache *ion_page_info_pool; + int ion_heap_is_system_heap_type(enum ion_heap_type type) { return type == ((enum ion_heap_type)ION_HEAP_TYPE_SYSTEM); @@ -126,20 +128,16 @@ void free_buffer_page(struct ion_system_heap *heap, } } -static struct page_info *alloc_largest_available(struct ion_system_heap *heap, - struct ion_buffer *buffer, - unsigned long size, - unsigned int max_order) +static int alloc_largest_available(struct page_info *info, + struct ion_system_heap *heap, + struct ion_buffer *buffer, + unsigned long size, + unsigned int max_order) { struct page *page; - struct page_info *info; int i; bool from_pool; - info = kmalloc(sizeof(*info), GFP_KERNEL); - if (!info) - return ERR_PTR(-ENOMEM); - for (i = 0; i < NUM_ORDERS; i++) { if (size < order_to_size(orders[i])) continue; @@ -154,28 +152,22 @@ static struct page_info *alloc_largest_available(struct ion_system_heap *heap, info->order = orders[i]; info->from_pool = from_pool; INIT_LIST_HEAD(&info->list); - return info; + return 0; } - kfree(info); - return ERR_PTR(-ENOMEM); + return -ENOMEM; } -static struct page_info *alloc_from_pool_preferred( +static int alloc_from_pool_preferred(struct page_info *info, struct ion_system_heap *heap, struct ion_buffer *buffer, unsigned long size, unsigned int max_order) { struct page *page; - struct page_info *info; int i; if (buffer->flags & ION_FLAG_POOL_FORCE_ALLOC) goto force_alloc; - info = kmalloc(sizeof(*info), GFP_KERNEL); - if (!info) - return ERR_PTR(-ENOMEM); - for (i = 0; i < NUM_ORDERS; i++) { if (size < order_to_size(orders[i])) continue; @@ -190,7 +182,7 @@ static struct page_info *alloc_from_pool_preferred( info->order = orders[i]; info->from_pool = true; INIT_LIST_HEAD(&info->list); - return info; + return 0; } page = split_page_from_secure_pool(heap, buffer); @@ -199,12 +191,11 @@ static struct page_info *alloc_from_pool_preferred( info->order = 0; info->from_pool = true; INIT_LIST_HEAD(&info->list); - return info; + return 0; } - kfree(info); force_alloc: - return alloc_largest_available(heap, buffer, size, max_order); + return alloc_largest_available(info, heap, buffer, size, max_order); } static unsigned int process_info(struct page_info *info, @@ -231,10 +222,16 @@ static unsigned int process_info(struct page_info *info, data->pages[i++] = nth_page(page, j); } list_del(&info->list); - kfree(info); return i; } +static void free_info(struct page_info *info, struct page_info *info_onstack, + size_t onstack_len) +{ + if (info < info_onstack || info > &info_onstack[onstack_len - 1]) + kmem_cache_free(ion_page_info_pool, info); +} + static int ion_heap_alloc_pages_mem(struct pages_mem *pages_mem) { struct page **pages; @@ -290,6 +287,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap, struct pages_mem data; unsigned int sz; int vmid = get_secure_vmid(buffer->flags); + struct page_info info_onstack[SZ_4K / sizeof(struct page_info)]; if (size / PAGE_SIZE > totalram_pages / 2) return -ENOMEM; @@ -306,16 +304,25 @@ static int ion_system_heap_allocate(struct ion_heap *heap, INIT_LIST_HEAD(&pages_from_pool); while (size_remaining > 0) { + if (i >= ARRAY_SIZE(info_onstack)) { + info = kmem_cache_alloc(ion_page_info_pool, GFP_KERNEL); + if (!info) + goto err; + } else { + info = &info_onstack[i]; + } + if (is_secure_vmid_valid(vmid)) - info = alloc_from_pool_preferred( + ret = alloc_from_pool_preferred(info, sys_heap, buffer, size_remaining, max_order); else - info = alloc_largest_available( + ret = alloc_largest_available(info, sys_heap, buffer, size_remaining, max_order); - if (IS_ERR(info)) { + if (ret) { + free_info(info, info_onstack, ARRAY_SIZE(info_onstack)); ret = PTR_ERR(info); goto err; } @@ -343,7 +350,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap, if (ret) goto err; - table = kzalloc(sizeof(*table), GFP_KERNEL); + table = kmalloc(sizeof(*table), GFP_KERNEL); if (!table) { ret = -ENOMEM; goto err_free_data_pages; @@ -376,15 +383,22 @@ static int ion_system_heap_allocate(struct ion_heap *heap, if (info && tmp_info) { if (info->order >= tmp_info->order) { i = process_info(info, sg, sg_sync, &data, i); + free_info(info, info_onstack, + ARRAY_SIZE(info_onstack)); sg_sync = sg_next(sg_sync); } else { i = process_info(tmp_info, sg, 0, 0, i); + free_info(tmp_info, info_onstack, + ARRAY_SIZE(info_onstack)); } } else if (info) { i = process_info(info, sg, sg_sync, &data, i); + free_info(info, info_onstack, ARRAY_SIZE(info_onstack)); sg_sync = sg_next(sg_sync); } else if (tmp_info) { i = process_info(tmp_info, sg, 0, 0, i); + free_info(tmp_info, info_onstack, + ARRAY_SIZE(info_onstack)); } sg = sg_next(sg); @@ -425,11 +439,11 @@ err_free_data_pages: err: list_for_each_entry_safe(info, tmp_info, &pages, list) { free_buffer_page(sys_heap, buffer, info->page, info->order); - kfree(info); + free_info(info, info_onstack, ARRAY_SIZE(info_onstack)); } list_for_each_entry_safe(info, tmp_info, &pages_from_pool, list) { free_buffer_page(sys_heap, buffer, info->page, info->order); - kfree(info); + free_info(info, info_onstack, ARRAY_SIZE(info_onstack)); } return ret; } @@ -655,9 +669,14 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *data) struct ion_system_heap *heap; int i; + ion_page_info_pool = KMEM_CACHE(page_info, 0); + if (!ion_page_info_pool) + return ERR_PTR(-ENOMEM); + heap = kzalloc(sizeof(*heap), GFP_KERNEL); if (!heap) - return ERR_PTR(-ENOMEM); + goto err_free_page_info_pool; + heap->heap.ops = &system_heap_ops; heap->heap.type = ION_HEAP_TYPE_SYSTEM; heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; @@ -687,6 +706,8 @@ destroy_secure_pools: ion_system_heap_destroy_pools(heap->secure_pools[i]); } kfree(heap); +err_free_page_info_pool: + kmem_cache_destroy(ion_page_info_pool); return ERR_PTR(-ENOMEM); } @@ -711,7 +732,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap, for (i = len >> PAGE_SHIFT; i < (1 << order); i++) __free_page(page + i); - table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + table = kmalloc(sizeof(*table), GFP_KERNEL); if (!table) { ret = -ENOMEM; goto free_pages; diff --git a/drivers/staging/qcacld-3.0/Kbuild b/drivers/staging/qcacld-3.0/Kbuild index ed745f0d7761..7515219db76c 100644 --- a/drivers/staging/qcacld-3.0/Kbuild +++ b/drivers/staging/qcacld-3.0/Kbuild @@ -2241,16 +2241,6 @@ ccflags-y += -DWLAN_MAX_VDEVS=$(CONFIG_WLAN_MAX_VDEVS) KBUILD_CPPFLAGS += $(cppflags-y) -# Currently, for versions of gcc which support it, the kernel Makefile -# is disabling the maybe-uninitialized warning. Re-enable it for the -# WLAN driver. Note that we must use ccflags-y here so that it -# will override the kernel settings. -ifeq ($(call cc-option-yn, -Wmaybe-uninitialized), y) -ccflags-y += -Wmaybe-uninitialized -ifneq (y,$(CONFIG_ARCH_MSM)) -ccflags-y += -Wframe-larger-than=4096 -endif -endif ccflags-y += -Wmissing-prototypes ifeq ($(call cc-option-yn, -Wheader-guard), y) diff --git a/drivers/staging/qcacld-3.0/configs/default_defconfig b/drivers/staging/qcacld-3.0/configs/default_defconfig index 8b38b2c77987..e1e0925657c7 100644 --- a/drivers/staging/qcacld-3.0/configs/default_defconfig +++ b/drivers/staging/qcacld-3.0/configs/default_defconfig @@ -301,9 +301,7 @@ CONFIG_TX_TID_OVERRIDE := y endif #Enable WLAN/Power debugfs feature only if debug_fs is enabled -ifeq ($(CONFIG_DEBUG_FS), y) - # Flag to enable debugfs. Depends on CONFIG_DEBUG_FS in kernel - # configuration. +ifeq ($(CONFIG_ANDROID), y) CONFIG_WLAN_DEBUGFS := y CONFIG_WLAN_POWER_DEBUGFS := y diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_debugfs.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_debugfs.c index eeb2bf44d95d..338f99ad731c 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_debugfs.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_debugfs.c @@ -26,6 +26,9 @@ */ #ifdef WLAN_OPEN_SOURCE +#if defined(CONFIG_ANDROID) && !defined(CONFIG_DEBUG_FS) +#define CONFIG_DEBUG_FS +#endif #include #include #include diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c index 7db6a4b9aa2d..be0740a5e5d9 100644 --- a/drivers/thermal/msm-tsens.c +++ b/drivers/thermal/msm-tsens.c @@ -316,7 +316,7 @@ int tsens_tm_probe(struct platform_device *pdev) tmdev->ipc_log0 = ipc_log_context_create(IPC_LOGPAGES, tsens_name, 0); if (!tmdev->ipc_log0) - pr_err("%s : unable to create IPC Logging 0 for tsens %pa", + pr_debug("%s : unable to create IPC Logging 0 for tsens %pa", __func__, &tmdev->phys_addr_tm); snprintf(tsens_name, sizeof(tsens_name), "tsens_%pa_1", @@ -325,7 +325,7 @@ int tsens_tm_probe(struct platform_device *pdev) tmdev->ipc_log1 = ipc_log_context_create(IPC_LOGPAGES, tsens_name, 0); if (!tmdev->ipc_log1) - pr_err("%s : unable to create IPC Logging 1 for tsens %pa", + pr_debug("%s : unable to create IPC Logging 1 for tsens %pa", __func__, &tmdev->phys_addr_tm); snprintf(tsens_name, sizeof(tsens_name), "tsens_%pa_2", @@ -334,7 +334,7 @@ int tsens_tm_probe(struct platform_device *pdev) tmdev->ipc_log2 = ipc_log_context_create(IPC_LOGPAGES, tsens_name, 0); if (!tmdev->ipc_log2) - pr_err("%s : unable to create IPC Logging 2 for tsens %pa", + pr_debug("%s : unable to create IPC Logging 2 for tsens %pa", __func__, &tmdev->phys_addr_tm); list_add_tail(&tmdev->list, &tsens_device_list); diff --git a/drivers/thermal/qcom/qti_virtual_sensor.c b/drivers/thermal/qcom/qti_virtual_sensor.c index 46bfd0d5c2d2..747c59c9858f 100644 --- a/drivers/thermal/qcom/qti_virtual_sensor.c +++ b/drivers/thermal/qcom/qti_virtual_sensor.c @@ -105,9 +105,8 @@ static const struct virtual_sensor_data qti_virtual_sensors[] = { }, { .virt_zone_name = "gpuss-max-step", - .num_sensors = 2, - .sensor_names = {"gpuss-0-usr", - "gpuss-1-usr"}, + .num_sensors = 1, + .sensor_names = {"gpuss-0-usr"}, .logic = VIRT_MAXIMUM, }, { diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index b0cfb8a7fd2d..81c770df7a95 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -1806,11 +1806,9 @@ static int screen_state_for_thermal_callback(struct notifier_block *nb, unsigned switch (blank) { case MSM_DRM_BLANK_POWERDOWN: sm.screen_state = 0; - pr_warn("%s: MSM_DRM_BLANK_POWERDOWN\n", __func__); break; case MSM_DRM_BLANK_UNBLANK: sm.screen_state = 1; - pr_warn("%s: MSM_DRM_BLANK_UNBLANK\n", __func__); break; default: break; diff --git a/drivers/thermal/tsens.h b/drivers/thermal/tsens.h index fc6e7f38b965..4cfb2fd9e250 100644 --- a/drivers/thermal/tsens.h +++ b/drivers/thermal/tsens.h @@ -65,43 +65,22 @@ struct tsens_device; #ifdef CONFIG_DEBUG_FS #define TSENS_IPC(idx, dev, msg, args...) do { \ - if (dev) { \ - if ((idx == 0) && (dev)->ipc_log0) \ - ipc_log_string((dev)->ipc_log0, \ - "%s: " msg, __func__, args); \ - else if ((idx == 1) && (dev)->ipc_log1) \ - ipc_log_string((dev)->ipc_log1, \ - "%s: " msg, __func__, args); \ - else if ((idx == 2) && (dev)->ipc_log2) \ - ipc_log_string((dev)->ipc_log2, \ - "%s: " msg, __func__, args); \ - else \ - pr_debug("tsens: invalid logging index\n"); \ - } \ + pr_debug("tsens: invalid logging index\n"); \ } while (0) #define TSENS_DUMP(dev, msg, args...) do { \ - TSENS_IPC(2, dev, msg, args); \ pr_info(msg, ##args); \ } while (0) #define TSENS_ERR(dev, msg, args...) do { \ pr_err(msg, ##args); \ - TSENS_IPC(1, dev, msg, args); \ } while (0) #define TSENS_INFO(dev, msg, args...) do { \ pr_info(msg, ##args); \ - TSENS_IPC(1, dev, msg, args); \ } while (0) #define TSENS_DBG(dev, msg, args...) do { \ pr_debug(msg, ##args); \ - if (dev) { \ - TSENS_IPC(0, dev, msg, args); \ - } \ } while (0) #define TSENS_DBG1(dev, msg, args...) do { \ pr_debug(msg, ##args); \ - if (dev) { \ - TSENS_IPC(1, dev, msg, args); \ - } \ } while (0) #else #define TSENS_DBG1(dev, msg, x...) pr_debug(msg, ##x) diff --git a/drivers/thermal/tsens2xxx.c b/drivers/thermal/tsens2xxx.c index 2ad2cfe49e4b..baf053810999 100644 --- a/drivers/thermal/tsens2xxx.c +++ b/drivers/thermal/tsens2xxx.c @@ -96,8 +96,7 @@ static int tsens2xxx_get_temp(struct tsens_sensor *sensor, int *temp) sensor_addr = TSENS_TM_SN_STATUS(tmdev->tsens_tm_addr); trdy = TSENS_TM_TRDY(tmdev->tsens_tm_addr); - code = readl_relaxed_no_log(trdy); - + code = readl_relaxed(trdy); if (!((code & TSENS_TM_TRDY_FIRST_ROUND_COMPLETE) >> TSENS_TM_TRDY_FIRST_ROUND_COMPLETE_SHIFT)) { pr_err("%s: tsens device first round not complete0x%x\n", @@ -105,7 +104,7 @@ static int tsens2xxx_get_temp(struct tsens_sensor *sensor, int *temp) /* Wait for 2.5 ms for tsens controller to recover */ do { udelay(500); - code = readl_relaxed_no_log(trdy); + code = readl_relaxed(trdy); if (code & TSENS_TM_TRDY_FIRST_ROUND_COMPLETE) { TSENS_DUMP(tmdev, "%s", "tsens controller recovered\n"); @@ -160,7 +159,7 @@ sensor_read: tmdev->trdy_fail_ctr = 0; - code = readl_relaxed_no_log(sensor_addr + + code = readl_relaxed(sensor_addr + (sensor->hw_id << TSENS_STATUS_ADDR_OFFSET)); last_temp = code & TSENS_TM_SN_LAST_TEMP_MASK; @@ -169,7 +168,7 @@ sensor_read: goto dbg; } - code = readl_relaxed_no_log(sensor_addr + + code = readl_relaxed(sensor_addr + (sensor->hw_id << TSENS_STATUS_ADDR_OFFSET)); last_temp2 = code & TSENS_TM_SN_LAST_TEMP_MASK; if (code & TSENS_TM_SN_STATUS_VALID_BIT) { @@ -178,7 +177,7 @@ sensor_read: goto dbg; } - code = readl_relaxed_no_log(sensor_addr + + code = readl_relaxed(sensor_addr + (sensor->hw_id << TSENS_STATUS_ADDR_OFFSET)); last_temp3 = code & TSENS_TM_SN_LAST_TEMP_MASK; diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c index 4346ed2c1c4a..45ebc031b189 100644 --- a/drivers/tty/serial/msm_serial_hs.c +++ b/drivers/tty/serial/msm_serial_hs.c @@ -88,26 +88,13 @@ enum { DBG_LEV = 4U, }; -#define MSM_HS_DBG(x...) do { \ - if (msm_uport->ipc_debug_mask >= DBG_LEV) { \ - if (msm_uport->ipc_msm_hs_log_ctxt) \ - ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \ - } \ -} while (0) +#define MSM_HS_DBG(x...) ((void)0) -#define MSM_HS_INFO(x...) do { \ - if (msm_uport->ipc_debug_mask >= INFO_LEV) {\ - if (msm_uport->ipc_msm_hs_log_ctxt) \ - ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \ - } \ -} while (0) +#define MSM_HS_INFO(x...) ((void)0) /* warnings and errors show up on console always */ #define MSM_HS_WARN(x...) do { \ pr_warn(x); \ - if (msm_uport->ipc_msm_hs_log_ctxt && \ - msm_uport->ipc_debug_mask >= WARN_LEV) \ - ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \ } while (0) /* ERROR condition in the driver sets the hs_serial_debug_mask @@ -116,17 +103,9 @@ enum { */ #define MSM_HS_ERR(x...) do { \ pr_err(x); \ - if (msm_uport->ipc_msm_hs_log_ctxt && \ - msm_uport->ipc_debug_mask >= ERR_LEV) { \ - ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \ - msm_uport->ipc_debug_mask = FATAL_LEV; \ - } \ } while (0) -#define LOG_USR_MSG(ctx, x...) do { \ - if (ctx) \ - ipc_log_string(ctx, x); \ -} while (0) +#define LOG_USR_MSG(ctx, x...) ((void)0) /* * There are 3 different kind of UART Core available on MSM. diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index cfcc9534df34..a0ff5fb6edd8 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1372,14 +1372,14 @@ static int dwc3_probe(struct platform_device *pdev) dwc->dwc_ipc_log_ctxt = ipc_log_context_create(NUM_LOG_PAGES, dev_name(dwc->dev), 0); if (!dwc->dwc_ipc_log_ctxt) - dev_err(dwc->dev, "Error getting ipc_log_ctxt\n"); + dev_dbg(dwc->dev, "Error getting ipc_log_ctxt\n"); snprintf(dma_ipc_log_ctx_name, sizeof(dma_ipc_log_ctx_name), "%s.ep_events", dev_name(dwc->dev)); dwc->dwc_dma_ipc_log_ctxt = ipc_log_context_create(NUM_LOG_PAGES, dma_ipc_log_ctx_name, 0); if (!dwc->dwc_dma_ipc_log_ctxt) - dev_err(dwc->dev, "Error getting ipc_log_ctxt for ep_events\n"); + dev_dbg(dwc->dev, "Error getting ipc_log_ctxt for ep_events\n"); dwc3_instance[count] = dwc; dwc->index = count; diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile index 853efff1ac49..7cd7afe1910e 100644 --- a/drivers/usb/gadget/function/Makefile +++ b/drivers/usb/gadget/function/Makefile @@ -33,6 +33,7 @@ obj-$(CONFIG_USB_F_RNDIS) += usb_f_rndis.o usb_f_mass_storage-y := f_mass_storage.o storage_common.o obj-$(CONFIG_USB_F_MASS_STORAGE)+= usb_f_mass_storage.o usb_f_fs-y := f_fs.o +CFLAGS_f_fs.o += $(call cc-disable-warning, unused-variable) obj-$(CONFIG_USB_F_FS) += usb_f_fs.o obj-$(CONFIG_USB_U_AUDIO) += u_audio.o usb_f_uac1-y := f_uac1.o diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index c46874d6f3b0..9dfcfb0ea32c 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -46,12 +46,10 @@ #ifdef CONFIG_DYNAMIC_DEBUG #define ffs_log(fmt, ...) do { \ - ipc_log_string(ffs->ipc_log, "%s: " fmt, __func__, ##__VA_ARGS__); \ dynamic_pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \ } while (0) #else -#define ffs_log(fmt, ...) \ - ipc_log_string(ffs->ipc_log, "%s: " fmt, __func__, ##__VA_ARGS__) +#define ffs_log(fmt, ...) ((void)0) #endif /* Reference counter handling */ diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c index 9f1658ea11a0..becc701f636d 100644 --- a/drivers/usb/gadget/function/f_gsi.c +++ b/drivers/usb/gadget/function/f_gsi.c @@ -3845,7 +3845,7 @@ static int fgsi_init(void) ipc_log_ctxt = ipc_log_context_create(NUM_LOG_PAGES, "usb_gsi", 0); if (!ipc_log_ctxt) - pr_err("%s: Err allocating ipc_log_ctxt\n", __func__); + pr_debug("%s: Err allocating ipc_log_ctxt\n", __func__); gsi_class = class_create(THIS_MODULE, "gsi_usb"); if (IS_ERR(gsi_class)) { diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c index 2dba060ed7ee..4e477fef102c 100644 --- a/drivers/usb/gadget/function/f_mtp.c +++ b/drivers/usb/gadget/function/f_mtp.c @@ -47,12 +47,10 @@ #ifdef CONFIG_DYNAMIC_DEBUG #define mtp_log(fmt, ...) do { \ - ipc_log_string(_mtp_ipc_log, "%s: " fmt, __func__, ##__VA_ARGS__); \ dynamic_pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \ } while (0) #else -#define mtp_log(fmt, ...) \ - ipc_log_string(_mtp_ipc_log, "%s: " fmt, __func__, ##__VA_ARGS__) +#define mtp_log(fmt, ...) ((void)0) #endif #define MTP_BULK_BUFFER_SIZE 16384 diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c index d5f067e8965f..b01e81df52f2 100644 --- a/drivers/usb/pd/policy_engine.c +++ b/drivers/usb/pd/policy_engine.c @@ -200,26 +200,18 @@ enum vdm_state { static void *usbpd_ipc_log; #define usbpd_dbg(dev, fmt, ...) do { \ - ipc_log_string(usbpd_ipc_log, "%s: %s: " fmt, dev_name(dev), __func__, \ - ##__VA_ARGS__); \ dev_dbg(dev, fmt, ##__VA_ARGS__); \ } while (0) #define usbpd_info(dev, fmt, ...) do { \ - ipc_log_string(usbpd_ipc_log, "%s: %s: " fmt, dev_name(dev), __func__, \ - ##__VA_ARGS__); \ dev_info(dev, fmt, ##__VA_ARGS__); \ } while (0) #define usbpd_warn(dev, fmt, ...) do { \ - ipc_log_string(usbpd_ipc_log, "%s: %s: " fmt, dev_name(dev), __func__, \ - ##__VA_ARGS__); \ dev_warn(dev, fmt, ##__VA_ARGS__); \ } while (0) #define usbpd_err(dev, fmt, ...) do { \ - ipc_log_string(usbpd_ipc_log, "%s: %s: " fmt, dev_name(dev), __func__, \ - ##__VA_ARGS__); \ dev_err(dev, fmt, ##__VA_ARGS__); \ } while (0) diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c index 174dab19de25..058d32aa675f 100644 --- a/drivers/usb/phy/phy-msm-qusb-v2.c +++ b/drivers/usb/phy/phy-msm-qusb-v2.c @@ -921,7 +921,7 @@ static int qusb_phy_create_debugfs(struct qusb_phy *qphy) qphy->root = debugfs_create_dir(dev_name(qphy->phy.dev), NULL); if (IS_ERR_OR_NULL(qphy->root)) { - dev_err(qphy->phy.dev, + dev_dbg(qphy->phy.dev, "can't create debugfs root for %s\n", dev_name(qphy->phy.dev)); ret = -ENOMEM; diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index 1d7add9e1b7a..702baa3c5ab6 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c @@ -183,7 +183,7 @@ int backlight_device_set_brightness(struct backlight_device *bd, rc = -EINVAL; else { if ((!bd->use_count && brightness) || (bd->use_count && !brightness)) { - pr_info("%s: set brightness to %lu\n", __func__, brightness); + pr_debug("%s: set brightness to %lu\n", __func__, brightness); if (!bd->use_count) bd->use_count++; else diff --git a/firmware/goodix_gt9886_cfg_f11.bin.ihex b/firmware/goodix_gt9886_cfg_f11.bin.ihex index 131d645b3478..2e40ca776507 100644 --- a/firmware/goodix_gt9886_cfg_f11.bin.ihex +++ b/firmware/goodix_gt9886_cfg_f11.bin.ihex @@ -1,110 +1,116 @@ -:10000000C2060000C700000000020000000000005F -:1000100014006B03570300006E6F726D616E64799C -:1000200000000000000000010139383836000000EF -:1000300000000000000000000000000000000000C0 -:100040000000000000000000000000004245040025 -:100050002C454800354504003D45040041450F004E -:100060000000000000000000786F0000F330000086 -:10007000686F000000410000000000000000000068 -:100080000000000000000000000000000019012B2B -:10009000BB011F34F188821E200401210117DE08F4 -:1000A000070400000F210000320000222112003F4F -:1000B000FFFF50025828244A270049030102040682 -:1000C00007080B090C0D1817191A1C1D1F212223D4 -:1000D000201E1B0A054BFFFFFF32343337364039F1 -:1000E0003E3A383F353B3C41FFFFFFFFFFFFFFFF3C -:1000F000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF10 -:10010000FF00000000000000000000000042035853 -:100110000D11130E23142022211F1D1C1B181A174A -:10012000160B0C0A09070604020100030508191E34 -:1001300012FFFFFF1E1C1D191A0217121618031BAF -:10014000151401FFFFFFFFFFFFFFFFFFFFFFFFFF92 -:10015000FFFFFFFFFFFFFFFFFFFFFFFF00000000AB -:10016000000000000000000054040901020105190C -:100170002D17DE00AF050A04380924003C005A0A96 -:100180000FD906020100F7071414161A20141A22B8 -:10019000161A20161A20000047080C0C151508081E -:1001A0000040008000C002026C090919000A000A20 -:1001B000000B000AAC0A131F0503002808007894FE -:1001C0008C8783807E1D202326293D0B020A05E4AF -:1001D0000C050800000000E70D0D4138A50BB80D17 -:1001E0002D043C001E0028450E0C030A010EB43CF1 -:1001F000AA0000003250AE0F00F11000F0110B0009 -:100200000100208D4E2064230000411207000000F1 -:1002100001010000E51308025800C8000F0000B4F7 -:100220001400EC1525000000040050010502001C1C -:100230000002004000040000000505050800010060 -:100240003A984E201432000000006A160B15206404 -:100250000BB800A0006400007F1700E9180C013201 -:10026000006400C81A1F252B0000F41911010A149C -:10027000501E040A321507000A2222360000691AAD -:100280005E00040335033513B1022403350335132F -:10029000B101351FFF1FFF1FFF000000000000001D -:1002A00000020406070B18262E020406070B182668 -:1002B0002E285A145A1402C832066464005A0800E0 -:1002C00064A8025FFE0B3C1E3C3203E814000000F1 -:1002D0000070000000004000000000000000003539 -:1002E0001B00E51C0F0A0410001E001E02C0000FB8 -:1002F00000C80000E21D00E31E00E21F00E1200430 -:1003000000006996DD210A01004000010060010043 -:100310000032221322053900000000000000000016 -:10032000000000000000006B230400002000B9243E -:1003300000DC252007000C5C11A436763BD3061B9D -:10034000336300157C000000006B0F964035005AA7 -:10035000004B321821260427000000AF2700D928BF -:1003600000D82900D72A00D62B00D5570300006EED -:100370006F726D616E647900000000000000010082 -:10038000393838360000000000000000000000008E -:10039000000000000000000000000000000000005D -:1003A000000000424504002C454800354504003D4E -:1003B00045040041450F00000000000000000078E7 -:1003C0006F0000F3300000686F0000004100000083 -:1003D000000000000000000000000000000000001D -:1003E0000000000019012BBB011F34F18882262870 -:1003F0000401210117DE08070400000F210000326C -:100400000000222112003FFFFF4002582022231E3D -:100410001F211B1C1D0A191A170D180C090B0807A0 -:1004200005060403020100494B4A274828FFFFFF45 -:100430002E30312F2D323834333537363B4039FFAB -:10044000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFBC -:10045000FFFFFFFFFFFFFFFF0000000000000000A4 -:1004600000000000670358030100050402080706A6 -:10047000190A090C160B171A181B1C1E1D1F202108 -:1004800022231412130E150DFFFFFF22201F21231C -:100490001E181C1D1B191A150217FFFFFFFFFFFF77 -:1004A000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5C -:1004B000FFFFFF000000000000000000000000EB54 -:1004C000040901020105192D17DE00AF050A0438E1 -:1004D0000924003C005A0A0FD906020100F707144C -:1004E00014161A20141A22161A20161A2000004771 -:1004F000080C0C151508080040008000C002026CB2 -:1005000009091F000A000A000B000AA60A131F05AA -:10051000030028080078948C8783807D1B1E21248B -:1005200028470B020A05E40C050800000000E70D4F -:100530000D4138A50BB81321044C001E00283B0EBA -:100540000C030A010EB43CAA0000003250AE0F00AA -:10055000F11000F0110B000100208D4E20642300EB -:100560000041120700000001010000E513080258D5 -:1005700000C8000F0000B41400EC152500000004B2 -:100580000050010502001C000200400004000000B1 -:10059000050505080001003A984E201432000000BD -:1005A000006A160B1520640BB800A0006400007FE1 -:1005B0001700E9180C0132006400C81B20262B002C -:1005C00000F11911010A14501E040A321507000A1D -:1005D0002222360000691A5E00040335033513B188 -:1005E00002240335033513B101351FFF1FFF1FFF21 -:1005F0000000000000000000020406070B18262E71 -:10060000020406070B18262E285A145A1402C83260 -:10061000066464005A080064A8025FFE0B3C1E3C9E -:100620003203E814000000007000000000400000E9 -:10063000000000000000351B00E51C0F0A0410003C -:100640001E001E02C0000F00C80000E21D00E31ED5 -:1006500000E21F00E1200400006896DE210A01008C -:100660004000010060010000322213220636000023 -:1006700000000000000000000000000000006D23EA -:100680000400002000B92400DC252007000C5C11C8 -:10069000A436763BD3061B336300157C00000000B4 -:1006A0006B0F964035005A004B3217222604270064 -:1006B0000000AF2700D92800D82900D72A00D62B60 -:0206C00000D563 -:00000001FF +:10000000260700005D000000000200000000000064 +:1000100014009D03890300006E6F726D616E647938 +:1000200000000000000000010139383836000000EF +:1000300000000000000000000000000000000000C0 +:100040000000000000000000000000004245040025 +:100050002C454800354504003D45040041450F004E +:100060000000000000000000786F0000F330000086 +:10007000686F000000410000000000000000000068 +:10008000000000000000000000000000001E012C25 +:10009000B5011F34F188821E200401210117DE08FA +:1000A000070400000F210000320000222112003F4F +:1000B000FFFF50025828244A270049030102040682 +:1000C00007080B090C0D1817191A1C1D1F212223D4 +:1000D000201E1B0A054BFFFFFF32343337364039F1 +:1000E0003E3A383F353B3C41FFFFFFFFFFFFFFFF3C +:1000F000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF10 +:10010000FF00000000000000000000000042035853 +:100110000D11130E23142022211F1D1C1B181A174A +:10012000160B0C0A09070604020100030508191E34 +:1001300012FFFFFF1E1C1D191A0217121618031BAF +:10014000151401FFFFFFFFFFFFFFFFFFFFFFFFFF92 +:10015000FFFFFFFFFFFFFFFFFFFFFFFF00000000AB +:10016000000000000000000054040901020105190C +:100170002D17DE00AF050A04380924003C005A0A96 +:100180000FD906020100F7071414161A20141A22B8 +:10019000161A20161A20000047080C0C151508081E +:1001A0000040008000C002026C090919000A000A20 +:1001B000000B000AAC0A131F0503002808007894FE +:1001C0008C8783807E1D202326293D0B020A05E4AF +:1001D0000C050800000000E70D0D4138A50BB80D17 +:1001E0002D043C001E0028450E0C030A010EB43CF1 +:1001F000AA0000003250AE0F00F11000F0110B0009 +:100200000100208D4E2064230000411207000000F1 +:1002100001010000E51308025800C8000F0000B4F7 +:100220001400EC1525000000040050010502001C1C +:100230000002004000040000000505050800010060 +:100240003A984E201432000000006A160B15206404 +:100250000BB800A0006400007F1700E9180C013201 +:10026000001900781A1F252B00008F1911010A149C +:10027000501E040A321507000A2222360000691AAD +:100280005E00040335033513B1022403350335132F +:10029000B101351FFF1FFF1FFF000000000000001D +:1002A00000020406070B18262E020406070B182668 +:1002B0002E285A145A1402C832066464005A0800E0 +:1002C00064A8025FFE0B3C1E3C3203E814000000F1 +:1002D0000070000000004000000000000000003539 +:1002E0001B00E51C0F0A0410001E001E02C0000FB8 +:1002F00000C80000E21D00E31E00E21F00E1200430 +:100300000000C8789C210A01004000010060010043 +:100310000032221322053900000000000000000016 +:10032000000000000000006B230400002000B9243E +:1003300000DC252007000C5C11A436763BD3061B9D +:10034000336300157C00002D876B0F964035005AF3 +:10035000004B32186D260427000000AF2700D92873 +:1003600000D82900D72A00D62B00D52C2F01928740 +:100370008887888787898685878484858584848320 +:100380008281807F7D7D7B7B77767472706C6C0060 +:1003900000000000000000000000000021890300B0 +:1003A000006E6F726D616E647900000000000000E5 +:1003B000010039383836000000000000000000005D +:1003C000000000000000000000000000000000002D +:1003D0000000000000424504002C4548003545045B +:1003E000003D45040041450F0000000000000000F2 +:1003F00000786F0000F3300000686F0000004100DB +:1004000000000000000000000000000000000000EC +:100410000000000000001E012CB5011F34F188828D +:1004200026280401210117DE08070400000F21001F +:1004300000320000222112003FFFFF40025820221C +:10044000231E1F211B1C1D0A191A170D180C090B3E +:10045000080705060403020100494B4A274828FF04 +:10046000FFFF2E30312F2D323834333537363B40B5 +:1004700039FFFFFFFFFFFFFFFFFFFFFFFFFFFFFF52 +:10048000FFFFFFFFFFFFFFFFFFFF00000000000076 +:100490000000000000006703580301000504020883 +:1004A0000706190A090C160B171A181B1C1E1D1F0C +:1004B000202122231412130E150DFFFFFF22201FEF +:1004C00021231E181C1D1B191A150217FFFFFFFF01 +:1004D000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF2C +:1004E000FFFFFFFFFF000000000000000000000011 +:1004F00000EB040901020105192D17DE00AF050A02 +:1005000004380924003C005A0A0FD906020100F7FA +:10051000071414161A20141A22161A20161A20006C +:100520000047080C0C151508080040008000C002A8 +:10053000026C09091F000A000A000B000AA60A1330 +:100540001F05030028080078948C8783807D1B1E7C +:10055000212428470B020A05E40C050800000000CE +:10056000E70D0D4138A50BB81321044C001E0028DF +:100570003B0E0C030A010EB43CAA0000003250AE40 +:100580000F00F11000F0110B000100208D4E2064CF +:1005900023000041120700000001010000E51308DC +:1005A000025800C8000F0000B41400EC152500002C +:1005B00000040050010502001C000200400004007D +:1005C0000000050505080001003A984E201432008D +:1005D0000000006A160B1520640BB800A000640030 +:1005E000007F1700E9180C0132001900781B202643 +:1005F0002B00008C1911010A14501E040A32150731 +:10060000000A2222360000691A5E00040335033511 +:1006100013B102240335033513B101351FFF1FFF4A +:100620001FFF0000000000000000020406070B1876 +:10063000262E020406070B18262E285A145A1402D6 +:10064000C832066464005A080064A8025FFE0B3CCE +:100650001E3C3203E814000000007000000000405F +:100660000000000000000000351B00E51C0F0A041C +:1006700010001E001E02C0000F00C80000E21D0096 +:10068000E31E00E21F00E120040000C8789C210A5C +:1006900001004000010060010000322213220636F2 +:1006A000000000000000000000000000000000004A +:1006B0006D230400002000B92400DC252007000C75 +:1006C0005C11A436763BD3061B336300157C000017 +:1006D0002D876B0F964035005A004B32176E26045B +:1006E00027000000AF2700D92800D82900D72A000A +:1006F000D62B00D52C2F01928788878887878986FB +:100700008587848485858484838281807F7D7D7BC9 +:100710007B77767472706C6C000000000000000043 +:06072000000000000021B2 +:00000001FF diff --git a/fs/Makefile b/fs/Makefile index 184a377f10b1..2376120b9796 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -123,7 +123,11 @@ obj-$(CONFIG_NILFS2_FS) += nilfs2/ obj-$(CONFIG_BEFS_FS) += befs/ obj-$(CONFIG_HOSTFS) += hostfs/ obj-$(CONFIG_CACHEFILES) += cachefiles/ -obj-$(CONFIG_DEBUG_FS) += debugfs/ +ifeq ($(CONFIG_DEBUG_FS),y) +obj-y += debugfs/ +else +obj-$(CONFIG_ANDROID) += debugfs/ +endif obj-$(CONFIG_TRACING) += tracefs/ obj-$(CONFIG_OCFS2_FS) += ocfs2/ obj-$(CONFIG_BTRFS_FS) += btrfs/ diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 2c433c95adb5..fe31f911a648 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -156,6 +156,25 @@ static int padzero(unsigned long elf_bss) #define ELF_BASE_PLATFORM NULL #endif +/* + * Use get_random_int() to implement AT_RANDOM while avoiding depletion + * of the entropy pool. + */ +static void get_atrandom_bytes(unsigned char *buf, size_t nbytes) +{ + unsigned char *p = buf; + + while (nbytes) { + unsigned int random_variable; + size_t chunk = min(nbytes, sizeof(random_variable)); + + random_variable = get_random_int(); + memcpy(p, &random_variable, chunk); + p += chunk; + nbytes -= chunk; + } +} + static int create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, unsigned long load_addr, unsigned long interp_load_addr) @@ -215,7 +234,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, /* * Generate 16 random bytes for userspace PRNG seeding. */ - get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes)); + get_atrandom_bytes(k_rand_bytes, sizeof(k_rand_bytes)); u_rand_bytes = (elf_addr_t __user *) STACK_ALLOC(p, sizeof(k_rand_bytes)); if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes))) @@ -681,7 +700,8 @@ static int load_elf_binary(struct linux_binprm *bprm) struct file *interpreter = NULL; /* to shut gcc up */ unsigned long load_addr = 0, load_bias = 0; int load_addr_set = 0; - char * elf_interpreter = NULL; + char elf_interpreter[PATH_MAX] __aligned(sizeof(long)); + bool interp_present = false; unsigned long error; struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL; unsigned long elf_bss, elf_brk; @@ -696,32 +716,26 @@ static int load_elf_binary(struct linux_binprm *bprm) struct { struct elfhdr elf_ex; struct elfhdr interp_elf_ex; - } *loc; + } loc; struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE; loff_t pos; - loc = kmalloc(sizeof(*loc), GFP_KERNEL); - if (!loc) { - retval = -ENOMEM; - goto out_ret; - } - /* Get the exec-header */ - loc->elf_ex = *((struct elfhdr *)bprm->buf); + loc.elf_ex = *((struct elfhdr *)bprm->buf); retval = -ENOEXEC; /* First of all, some simple consistency checks */ - if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0) + if (memcmp(loc.elf_ex.e_ident, ELFMAG, SELFMAG) != 0) goto out; - if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN) + if (loc.elf_ex.e_type != ET_EXEC && loc.elf_ex.e_type != ET_DYN) goto out; - if (!elf_check_arch(&loc->elf_ex)) + if (!elf_check_arch(&loc.elf_ex)) goto out; if (!bprm->file->f_op->mmap) goto out; - elf_phdata = load_elf_phdrs(&loc->elf_ex, bprm->file); + elf_phdata = load_elf_phdrs(&loc.elf_ex, bprm->file); if (!elf_phdata) goto out; @@ -734,7 +748,7 @@ static int load_elf_binary(struct linux_binprm *bprm) start_data = 0; end_data = 0; - for (i = 0; i < loc->elf_ex.e_phnum; i++) { + for (i = 0; i < loc.elf_ex.e_phnum; i++) { if (elf_ppnt->p_type == PT_INTERP) { /* This is the program interpreter used for * shared libraries - for now assume that this @@ -745,29 +759,24 @@ static int load_elf_binary(struct linux_binprm *bprm) elf_ppnt->p_filesz < 2) goto out_free_ph; - retval = -ENOMEM; - elf_interpreter = kmalloc(elf_ppnt->p_filesz, - GFP_KERNEL); - if (!elf_interpreter) - goto out_free_ph; - + interp_present = true; pos = elf_ppnt->p_offset; retval = kernel_read(bprm->file, elf_interpreter, elf_ppnt->p_filesz, &pos); if (retval != elf_ppnt->p_filesz) { if (retval >= 0) retval = -EIO; - goto out_free_interp; + goto out_free_ph; } /* make sure path is NULL terminated */ retval = -ENOEXEC; if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0') - goto out_free_interp; + goto out_free_ph; interpreter = open_exec(elf_interpreter); retval = PTR_ERR(interpreter); if (IS_ERR(interpreter)) - goto out_free_interp; + goto out_free_ph; /* * If the binary is not readable then enforce @@ -778,9 +787,9 @@ static int load_elf_binary(struct linux_binprm *bprm) /* Get the exec headers */ pos = 0; - retval = kernel_read(interpreter, &loc->interp_elf_ex, - sizeof(loc->interp_elf_ex), &pos); - if (retval != sizeof(loc->interp_elf_ex)) { + retval = kernel_read(interpreter, &loc.interp_elf_ex, + sizeof(loc.interp_elf_ex), &pos); + if (retval != sizeof(loc.interp_elf_ex)) { if (retval >= 0) retval = -EIO; goto out_free_dentry; @@ -792,7 +801,7 @@ static int load_elf_binary(struct linux_binprm *bprm) } elf_ppnt = elf_phdata; - for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) + for (i = 0; i < loc.elf_ex.e_phnum; i++, elf_ppnt++) switch (elf_ppnt->p_type) { case PT_GNU_STACK: if (elf_ppnt->p_flags & PF_X) @@ -802,7 +811,7 @@ static int load_elf_binary(struct linux_binprm *bprm) break; case PT_LOPROC ... PT_HIPROC: - retval = arch_elf_pt_proc(&loc->elf_ex, elf_ppnt, + retval = arch_elf_pt_proc(&loc.elf_ex, elf_ppnt, bprm->file, false, &arch_state); if (retval) @@ -811,27 +820,27 @@ static int load_elf_binary(struct linux_binprm *bprm) } /* Some simple consistency checks for the interpreter */ - if (elf_interpreter) { + if (interp_present) { retval = -ELIBBAD; /* Not an ELF interpreter */ - if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0) + if (memcmp(loc.interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0) goto out_free_dentry; /* Verify the interpreter has a valid arch */ - if (!elf_check_arch(&loc->interp_elf_ex)) + if (!elf_check_arch(&loc.interp_elf_ex)) goto out_free_dentry; /* Load the interpreter program headers */ - interp_elf_phdata = load_elf_phdrs(&loc->interp_elf_ex, + interp_elf_phdata = load_elf_phdrs(&loc.interp_elf_ex, interpreter); if (!interp_elf_phdata) goto out_free_dentry; /* Pass PT_LOPROC..PT_HIPROC headers to arch code */ elf_ppnt = interp_elf_phdata; - for (i = 0; i < loc->interp_elf_ex.e_phnum; i++, elf_ppnt++) + for (i = 0; i < loc.interp_elf_ex.e_phnum; i++, elf_ppnt++) switch (elf_ppnt->p_type) { case PT_LOPROC ... PT_HIPROC: - retval = arch_elf_pt_proc(&loc->interp_elf_ex, + retval = arch_elf_pt_proc(&loc.interp_elf_ex, elf_ppnt, interpreter, true, &arch_state); if (retval) @@ -845,8 +854,8 @@ static int load_elf_binary(struct linux_binprm *bprm) * still possible to return an error to the code that invoked * the exec syscall. */ - retval = arch_check_elf(&loc->elf_ex, - !!interpreter, &loc->interp_elf_ex, + retval = arch_check_elf(&loc.elf_ex, + !!interpreter, &loc.interp_elf_ex, &arch_state); if (retval) goto out_free_dentry; @@ -858,8 +867,8 @@ static int load_elf_binary(struct linux_binprm *bprm) /* Do this immediately, since STACK_TOP as used in setup_arg_pages may depend on the personality. */ - SET_PERSONALITY2(loc->elf_ex, &arch_state); - if (elf_read_implies_exec(loc->elf_ex, executable_stack)) + SET_PERSONALITY2(loc.elf_ex, &arch_state); + if (elf_read_implies_exec(loc.elf_ex, executable_stack)) current->personality |= READ_IMPLIES_EXEC; if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) @@ -880,7 +889,7 @@ static int load_elf_binary(struct linux_binprm *bprm) /* Now we do a little grungy work by mmapping the ELF image into the correct location in memory. */ for(i = 0, elf_ppnt = elf_phdata; - i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { + i < loc.elf_ex.e_phnum; i++, elf_ppnt++) { int elf_prot = 0, elf_flags; unsigned long k, vaddr; unsigned long total_size = 0; @@ -929,9 +938,9 @@ static int load_elf_binary(struct linux_binprm *bprm) * If we are loading ET_EXEC or we have already performed * the ET_DYN load_addr calculations, proceed normally. */ - if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) { + if (loc.elf_ex.e_type == ET_EXEC || load_addr_set) { elf_flags |= MAP_FIXED; - } else if (loc->elf_ex.e_type == ET_DYN) { + } else if (loc.elf_ex.e_type == ET_DYN) { /* * This logic is run once for the first LOAD Program * Header for ET_DYN binaries to calculate the @@ -962,7 +971,7 @@ static int load_elf_binary(struct linux_binprm *bprm) * independently randomized mmap region (0 load_bias * without MAP_FIXED). */ - if (elf_interpreter) { + if (interp_present) { load_bias = ELF_ET_DYN_BASE; if (current->flags & PF_RANDOMIZE) load_bias += arch_mmap_rnd(); @@ -980,7 +989,7 @@ static int load_elf_binary(struct linux_binprm *bprm) load_bias = ELF_PAGESTART(load_bias - vaddr); total_size = total_mapping_size(elf_phdata, - loc->elf_ex.e_phnum); + loc.elf_ex.e_phnum); if (!total_size) { retval = -EINVAL; goto out_free_dentry; @@ -998,7 +1007,7 @@ static int load_elf_binary(struct linux_binprm *bprm) if (!load_addr_set) { load_addr_set = 1; load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset); - if (loc->elf_ex.e_type == ET_DYN) { + if (loc.elf_ex.e_type == ET_DYN) { load_bias += error - ELF_PAGESTART(load_bias + vaddr); load_addr += load_bias; @@ -1039,7 +1048,7 @@ static int load_elf_binary(struct linux_binprm *bprm) } } - loc->elf_ex.e_entry += load_bias; + loc.elf_ex.e_entry += load_bias; elf_bss += load_bias; elf_brk += load_bias; start_code += load_bias; @@ -1060,10 +1069,10 @@ static int load_elf_binary(struct linux_binprm *bprm) goto out_free_dentry; } - if (elf_interpreter) { + if (interp_present) { unsigned long interp_map_addr = 0; - elf_entry = load_elf_interp(&loc->interp_elf_ex, + elf_entry = load_elf_interp(&loc.interp_elf_ex, interpreter, &interp_map_addr, load_bias, interp_elf_phdata); @@ -1073,7 +1082,7 @@ static int load_elf_binary(struct linux_binprm *bprm) * adjustment */ interp_load_addr = elf_entry; - elf_entry += loc->interp_elf_ex.e_entry; + elf_entry += loc.interp_elf_ex.e_entry; } if (BAD_ADDR(elf_entry)) { retval = IS_ERR((void *)elf_entry) ? @@ -1084,9 +1093,8 @@ static int load_elf_binary(struct linux_binprm *bprm) allow_write_access(interpreter); fput(interpreter); - kfree(elf_interpreter); } else { - elf_entry = loc->elf_ex.e_entry; + elf_entry = loc.elf_ex.e_entry; if (BAD_ADDR(elf_entry)) { retval = -EINVAL; goto out_free_dentry; @@ -1099,12 +1107,12 @@ static int load_elf_binary(struct linux_binprm *bprm) set_binfmt(&elf_format); #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES - retval = arch_setup_additional_pages(bprm, !!elf_interpreter); + retval = arch_setup_additional_pages(bprm, interp_present); if (retval < 0) goto out; #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */ - retval = create_elf_tables(bprm, &loc->elf_ex, + retval = create_elf_tables(bprm, &loc.elf_ex, load_addr, interp_load_addr); if (retval < 0) goto out; @@ -1124,7 +1132,7 @@ static int load_elf_binary(struct linux_binprm *bprm) * growing down), and into the unused ELF_ET_DYN_BASE region. */ if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) && - loc->elf_ex.e_type == ET_DYN && !interpreter) + loc.elf_ex.e_type == ET_DYN && !interpreter) current->mm->brk = current->mm->start_brk = ELF_ET_DYN_BASE; @@ -1161,8 +1169,6 @@ static int load_elf_binary(struct linux_binprm *bprm) start_thread(regs, elf_entry, bprm->p); retval = 0; out: - kfree(loc); -out_ret: return retval; /* error cleanup */ @@ -1171,8 +1177,6 @@ out_free_dentry: allow_write_access(interpreter); if (interpreter) fput(interpreter); -out_free_interp: - kfree(elf_interpreter); out_free_ph: kfree(elf_phdata); goto out; diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index e966a8c5c7c1..11df33c2d8a5 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -1208,13 +1208,6 @@ COMPATIBLE_IOCTL(WDIOC_SETTIMEOUT) COMPATIBLE_IOCTL(WDIOC_GETTIMEOUT) COMPATIBLE_IOCTL(WDIOC_SETPRETIMEOUT) COMPATIBLE_IOCTL(WDIOC_GETPRETIMEOUT) -/* Big R */ -COMPATIBLE_IOCTL(RNDGETENTCNT) -COMPATIBLE_IOCTL(RNDADDTOENTCNT) -COMPATIBLE_IOCTL(RNDGETPOOL) -COMPATIBLE_IOCTL(RNDADDENTROPY) -COMPATIBLE_IOCTL(RNDZAPENTCNT) -COMPATIBLE_IOCTL(RNDCLEARPOOL) /* Bluetooth */ COMPATIBLE_IOCTL(HCIDEVUP) COMPATIBLE_IOCTL(HCIDEVDOWN) diff --git a/fs/debugfs/Makefile b/fs/debugfs/Makefile index 840c45696668..31fdd0c0d663 100644 --- a/fs/debugfs/Makefile +++ b/fs/debugfs/Makefile @@ -1,4 +1,7 @@ debugfs-objs := inode.o file.o -obj-$(CONFIG_DEBUG_FS) += debugfs.o +ifeq ($(CONFIG_ANDROID),y) +ccflags-y := -DCONFIG_DEBUG_FS +endif +obj-y += debugfs.o diff --git a/fs/exec.c b/fs/exec.c index e8e592d2020f..2bd49fbdf38b 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1413,7 +1413,6 @@ static void free_bprm(struct linux_binprm *bprm) /* If a binfmt changed the interp, free it. */ if (bprm->interp != bprm->filename) kfree(bprm->interp); - kfree(bprm); } int bprm_change_interp(const char *interp, struct linux_binprm *bprm) @@ -1699,7 +1698,7 @@ static int do_execveat_common(int fd, struct filename *filename, int flags) { char *pathbuf = NULL; - struct linux_binprm *bprm; + struct linux_binprm bprm; struct file *file; struct files_struct *displaced; int retval; @@ -1727,16 +1726,13 @@ static int do_execveat_common(int fd, struct filename *filename, if (retval) goto out_ret; - retval = -ENOMEM; - bprm = kzalloc(sizeof(*bprm), GFP_KERNEL); - if (!bprm) - goto out_files; + memset(&bprm, 0, sizeof(bprm)); - retval = prepare_bprm_creds(bprm); + retval = prepare_bprm_creds(&bprm); if (retval) goto out_free; - check_unsafe_exec(bprm); + check_unsafe_exec(&bprm); current->in_execve = 1; file = do_open_execat(fd, filename, flags); @@ -1746,9 +1742,9 @@ static int do_execveat_common(int fd, struct filename *filename, sched_exec(); - bprm->file = file; + bprm.file = file; if (fd == AT_FDCWD || filename->name[0] == '/') { - bprm->filename = filename->name; + bprm.filename = filename->name; } else { if (filename->name[0] == '\0') pathbuf = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd); @@ -1765,41 +1761,41 @@ static int do_execveat_common(int fd, struct filename *filename, * current->files (due to unshare_files above). */ if (close_on_exec(fd, rcu_dereference_raw(current->files->fdt))) - bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE; - bprm->filename = pathbuf; + bprm.interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE; + bprm.filename = pathbuf; } - bprm->interp = bprm->filename; + bprm.interp = bprm.filename; - retval = bprm_mm_init(bprm); + retval = bprm_mm_init(&bprm); if (retval) goto out_unmark; - bprm->argc = count(argv, MAX_ARG_STRINGS); - if ((retval = bprm->argc) < 0) + bprm.argc = count(argv, MAX_ARG_STRINGS); + if ((retval = bprm.argc) < 0) goto out; - bprm->envc = count(envp, MAX_ARG_STRINGS); - if ((retval = bprm->envc) < 0) + bprm.envc = count(envp, MAX_ARG_STRINGS); + if ((retval = bprm.envc) < 0) goto out; - retval = prepare_binprm(bprm); + retval = prepare_binprm(&bprm); if (retval < 0) goto out; - retval = copy_strings_kernel(1, &bprm->filename, bprm); + retval = copy_strings_kernel(1, &bprm.filename, &bprm); if (retval < 0) goto out; - bprm->exec = bprm->p; - retval = copy_strings(bprm->envc, envp, bprm); + bprm.exec = bprm.p; + retval = copy_strings(bprm.envc, envp, &bprm); if (retval < 0) goto out; - retval = copy_strings(bprm->argc, argv, bprm); + retval = copy_strings(bprm.argc, argv, &bprm); if (retval < 0) goto out; - retval = exec_binprm(bprm); + retval = exec_binprm(&bprm); if (retval < 0) goto out; @@ -1809,7 +1805,7 @@ static int do_execveat_common(int fd, struct filename *filename, membarrier_execve(current); acct_update_integrals(current); task_numa_free(current, false); - free_bprm(bprm); + free_bprm(&bprm); kfree(pathbuf); putname(filename); if (displaced) @@ -1817,9 +1813,9 @@ static int do_execveat_common(int fd, struct filename *filename, return retval; out: - if (bprm->mm) { - acct_arg_size(bprm, 0); - mmput(bprm->mm); + if (bprm.mm) { + acct_arg_size(&bprm, 0); + mmput(bprm.mm); } out_unmark: @@ -1827,10 +1823,9 @@ out_unmark: current->in_execve = 0; out_free: - free_bprm(bprm); + free_bprm(&bprm); kfree(pathbuf); -out_files: if (displaced) reset_files_struct(displaced); out_ret: diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index d7cedfaa1cc0..1821543bbc5c 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -349,7 +349,6 @@ MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc"); * */ static struct kmem_cache *ext4_pspace_cachep; -static struct kmem_cache *ext4_ac_cachep; static struct kmem_cache *ext4_free_data_cachep; /* We create slab caches for groupinfo data structures based on the @@ -2936,18 +2935,10 @@ int __init ext4_init_mballoc(void) if (ext4_pspace_cachep == NULL) return -ENOMEM; - ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, - SLAB_RECLAIM_ACCOUNT); - if (ext4_ac_cachep == NULL) { - kmem_cache_destroy(ext4_pspace_cachep); - return -ENOMEM; - } - ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, SLAB_RECLAIM_ACCOUNT); if (ext4_free_data_cachep == NULL) { kmem_cache_destroy(ext4_pspace_cachep); - kmem_cache_destroy(ext4_ac_cachep); return -ENOMEM; } return 0; @@ -2961,7 +2952,6 @@ void ext4_exit_mballoc(void) */ rcu_barrier(); kmem_cache_destroy(ext4_pspace_cachep); - kmem_cache_destroy(ext4_ac_cachep); kmem_cache_destroy(ext4_free_data_cachep); ext4_groupinfo_destroy_slabs(); } @@ -4519,7 +4509,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, struct ext4_allocation_request *ar, int *errp) { int freed; - struct ext4_allocation_context *ac = NULL; + struct ext4_allocation_context ac; struct ext4_sb_info *sbi; struct super_block *sb; ext4_fsblk_t block = 0; @@ -4572,52 +4562,46 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, } } - ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); - if (!ac) { - ar->len = 0; - *errp = -ENOMEM; - goto out; - } - - *errp = ext4_mb_initialize_context(ac, ar); + memset(&ac, 0, sizeof(ac)); + *errp = ext4_mb_initialize_context(&ac, ar); if (*errp) { ar->len = 0; goto out; } - ac->ac_op = EXT4_MB_HISTORY_PREALLOC; - if (!ext4_mb_use_preallocated(ac)) { - ac->ac_op = EXT4_MB_HISTORY_ALLOC; - ext4_mb_normalize_request(ac, ar); + ac.ac_op = EXT4_MB_HISTORY_PREALLOC; + if (!ext4_mb_use_preallocated(&ac)) { + ac.ac_op = EXT4_MB_HISTORY_ALLOC; + ext4_mb_normalize_request(&ac, ar); repeat: /* allocate space in core */ - *errp = ext4_mb_regular_allocator(ac); + *errp = ext4_mb_regular_allocator(&ac); if (*errp) goto discard_and_exit; /* as we've just preallocated more space than * user requested originally, we store allocated * space in a special descriptor */ - if (ac->ac_status == AC_STATUS_FOUND && - ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) - *errp = ext4_mb_new_preallocation(ac); + if (ac.ac_status == AC_STATUS_FOUND && + ac.ac_o_ex.fe_len < ac.ac_b_ex.fe_len) + *errp = ext4_mb_new_preallocation(&ac); if (*errp) { discard_and_exit: - ext4_discard_allocated_blocks(ac); + ext4_discard_allocated_blocks(&ac); goto errout; } } - if (likely(ac->ac_status == AC_STATUS_FOUND)) { - *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); + if (likely(ac.ac_status == AC_STATUS_FOUND)) { + *errp = ext4_mb_mark_diskspace_used(&ac, handle, reserv_clstrs); if (*errp) { - ext4_discard_allocated_blocks(ac); + ext4_discard_allocated_blocks(&ac); goto errout; } else { - block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); - ar->len = ac->ac_b_ex.fe_len; + block = ext4_grp_offs_to_block(sb, &ac.ac_b_ex); + ar->len = ac.ac_b_ex.fe_len; } } else { - freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); + freed = ext4_mb_discard_preallocations(sb, ac.ac_o_ex.fe_len); if (freed) goto repeat; *errp = -ENOSPC; @@ -4625,14 +4609,12 @@ repeat: errout: if (*errp) { - ac->ac_b_ex.fe_len = 0; + ac.ac_b_ex.fe_len = 0; ar->len = 0; - ext4_mb_show_ac(ac); + ext4_mb_show_ac(&ac); } - ext4_mb_release_context(ac); + ext4_mb_release_context(&ac); out: - if (ac) - kmem_cache_free(ext4_ac_cachep, ac); if (inquota && ar->len < inquota) dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); if (!ar->len) { diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 3e7dde01eb89..b423641ac66f 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -1813,8 +1813,7 @@ static long wb_writeback(struct bdi_writeback *wb, * safe. */ if (work->for_kupdate) { - oldest_jif = jiffies - - msecs_to_jiffies(dirty_expire_interval * 10); + oldest_jif = jiffies - (30 * HZ); } else if (work->for_background) oldest_jif = jiffies; diff --git a/fs/inode.c b/fs/inode.c index 4e30a37ef712..d394f22bb66f 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -2123,29 +2123,6 @@ void inode_nohighmem(struct inode *inode) } EXPORT_SYMBOL(inode_nohighmem); -/** - * current_time - Return FS time - * @inode: inode. - * - * Return the current time truncated to the time granularity supported by - * the fs. - * - * Note that inode and inode->sb cannot be NULL. - * Otherwise, the function warns and returns time without truncation. - */ -struct timespec current_time(struct inode *inode) -{ - struct timespec now = current_kernel_time(); - - if (unlikely(!inode->i_sb)) { - WARN(1, "current_time() called with uninitialized super_block in the inode"); - return now; - } - - return timespec_trunc(now, inode->i_sb->s_time_gran); -} -EXPORT_SYMBOL(current_time); - /* * Generic function to check FS_IOC_SETFLAGS values and reject any invalid * configurations. diff --git a/fs/ioctl.c b/fs/ioctl.c index 9db5ddaf7ef0..a3ac33ef9cb5 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -702,3 +703,37 @@ SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg) fdput(f); return error; } + +#ifdef CONFIG_COMPAT +/** + * compat_ptr_ioctl - generic implementation of .compat_ioctl file operation + * + * This is not normally called as a function, but instead set in struct + * file_operations as + * + * .compat_ioctl = compat_ptr_ioctl, + * + * On most architectures, the compat_ptr_ioctl() just passes all arguments + * to the corresponding ->ioctl handler. The exception is arch/s390, where + * compat_ptr() clears the top bit of a 32-bit pointer value, so user space + * pointers to the second 2GB alias the first 2GB, as is the case for + * native 32-bit s390 user space. + * + * The compat_ptr_ioctl() function must therefore be used only with ioctl + * functions that either ignore the argument or pass a pointer to a + * compatible data type. + * + * If any ioctl command handled by fops->unlocked_ioctl passes a plain + * integer instead of a pointer, or any of the passed data types + * is incompatible between 32-bit and 64-bit architectures, a proper + * handler is required instead of compat_ptr_ioctl. + */ +long compat_ptr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + if (!file->f_op->unlocked_ioctl) + return -ENOIOCTLCMD; + + return file->f_op->unlocked_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); +} +EXPORT_SYMBOL(compat_ptr_ioctl); +#endif diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index d5b3cd692b29..6625061348c0 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -39,6 +39,15 @@ struct kernfs_open_node { struct list_head files; /* goes through kernfs_open_file.list */ }; +static struct kmem_cache *kmem_open_node_pool; +static struct kmem_cache *kmem_open_file_pool; + +void __init init_kernfs_file_pool(void) +{ + kmem_open_node_pool = KMEM_CACHE(kernfs_open_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC); + kmem_open_file_pool = KMEM_CACHE(kernfs_open_file, SLAB_HWCACHE_ALIGN | SLAB_PANIC); +} + /* * kernfs_notify() may be called from any context and bounces notifications * through a work item. To minimize space overhead in kernfs_node, the @@ -276,6 +285,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf, struct kernfs_open_file *of = kernfs_of(file); const struct kernfs_ops *ops; ssize_t len; + char stack_buf[PATH_MAX + 1]; char *buf; if (of->atomic_write_len) { @@ -287,12 +297,13 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf, } buf = of->prealloc_buf; - if (buf) + if (buf) { mutex_lock(&of->prealloc_mutex); - else - buf = kmalloc(len + 1, GFP_KERNEL); - if (!buf) - return -ENOMEM; + if (!buf) + return -ENOMEM; + } else { + buf = stack_buf; + } if (copy_from_user(buf, user_buf, len)) { len = -EFAULT; @@ -326,8 +337,6 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf, out_free: if (buf == of->prealloc_buf) mutex_unlock(&of->prealloc_mutex); - else - kfree(buf); return len; } @@ -564,12 +573,13 @@ static int kernfs_get_open_node(struct kernfs_node *kn, mutex_unlock(&kernfs_open_file_mutex); if (on) { - kfree(new_on); + if (new_on) + kmem_cache_free(kmem_open_node_pool, new_on); return 0; } /* not there, initialize a new one and retry */ - new_on = kmalloc(sizeof(*new_on), GFP_KERNEL); + new_on = kmem_cache_alloc(kmem_open_node_pool, GFP_KERNEL); if (!new_on) return -ENOMEM; @@ -611,7 +621,8 @@ static void kernfs_put_open_node(struct kernfs_node *kn, spin_unlock_irqrestore(&kernfs_open_node_lock, flags); mutex_unlock(&kernfs_open_file_mutex); - kfree(on); + if (on) + kmem_cache_free(kmem_open_node_pool, on); } static int kernfs_fop_open(struct inode *inode, struct file *file) @@ -645,7 +656,7 @@ static int kernfs_fop_open(struct inode *inode, struct file *file) /* allocate a kernfs_open_file for the file */ error = -ENOMEM; - of = kzalloc(sizeof(struct kernfs_open_file), GFP_KERNEL); + of = kmem_cache_zalloc(kmem_open_file_pool, GFP_KERNEL); if (!of) goto err_out; @@ -736,7 +747,7 @@ err_seq_release: seq_release(inode, file); err_free: kfree(of->prealloc_buf); - kfree(of); + kmem_cache_free(kmem_open_file_pool, of); err_out: kernfs_put_active(kn); return error; @@ -780,7 +791,8 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp) kernfs_put_open_node(kn, of); seq_release(inode, filp); kfree(of->prealloc_buf); - kfree(of); + if (of) + kmem_cache_free(kmem_open_file_pool, of); return 0; } diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h index 0f260dcca177..d0e7a9ac95fa 100644 --- a/fs/kernfs/kernfs-internal.h +++ b/fs/kernfs/kernfs-internal.h @@ -113,6 +113,7 @@ struct kernfs_node *kernfs_find_and_get_node_by_ino(struct kernfs_root *root, */ extern const struct file_operations kernfs_file_fops; +void __init init_kernfs_file_pool(void); void kernfs_drain_open_files(struct kernfs_node *kn); /* diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c index 5019058e0f6a..9d00722d7deb 100644 --- a/fs/kernfs/mount.c +++ b/fs/kernfs/mount.c @@ -405,7 +405,7 @@ struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns) void __init kernfs_init(void) { - + init_kernfs_file_pool(); /* * the slab is freed in RCU context, so kernfs_find_and_get_node_by_ino * can access the slab lock free. This could introduce stale nodes, diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c index 8233e7af9389..f272eeea1efb 100644 --- a/fs/proc/cmdline.c +++ b/fs/proc/cmdline.c @@ -3,10 +3,13 @@ #include #include #include +#include + +static char new_command_line[COMMAND_LINE_SIZE]; static int cmdline_proc_show(struct seq_file *m, void *v) { - seq_puts(m, saved_command_line); + seq_puts(m, new_command_line); seq_putc(m, '\n'); return 0; } @@ -23,8 +26,39 @@ static const struct file_operations cmdline_proc_fops = { .release = single_release, }; +static void patch_flag(char *cmd, const char *flag, const char *val) +{ + size_t flag_len, val_len; + char *start, *end; + + start = strstr(cmd, flag); + if (!start) + return; + + flag_len = strlen(flag); + val_len = strlen(val); + end = start + flag_len + strcspn(start + flag_len, " "); + memmove(start + flag_len + val_len, end, strlen(end) + 1); + memcpy(start + flag_len, val, val_len); +} + +static void patch_safetynet_flags(char *cmd) +{ + patch_flag(cmd, "androidboot.verifiedbootstate=", "green"); + patch_flag(cmd, "androidboot.veritymode=", "enforcing"); + patch_flag(cmd, "androidboot.vbmeta.device_state=", "locked"); +} + static int __init proc_cmdline_init(void) { + strcpy(new_command_line, saved_command_line); + + /* + * Patch various flags from command line seen by userspace in order to + * pass SafetyNet checks. + */ + patch_safetynet_flags(new_command_line); + proc_create("cmdline", 0, NULL, &cmdline_proc_fops); return 0; } diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c index bb3f59bcfcf5..96a323b3d733 100644 --- a/fs/quota/quota_tree.c +++ b/fs/quota/quota_tree.c @@ -46,14 +46,37 @@ static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info) / info->dqi_entry_size; } -static char *getdqbuf(size_t size) -{ - char *buf = kmalloc(size, GFP_NOFS); - if (!buf) - printk(KERN_WARNING - "VFS: Not enough memory for quota buffers.\n"); - return buf; -} +#define STACK_ALLOC_SIZE SZ_1K +#define GETDQBUF_NORET(size) \ + char *buf; \ + char buf_onstack[STACK_ALLOC_SIZE] __aligned(8); \ + if (unlikely(size > STACK_ALLOC_SIZE)) { \ + buf = kmalloc(size, GFP_NOFS); \ + if (!buf) \ + printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n"); \ + } else { \ + buf = buf_onstack; \ + } + +#define __GETDQBUF(size) \ + if (unlikely(size > STACK_ALLOC_SIZE)) { \ + buf = kmalloc(size, GFP_NOFS); \ + if (!buf) { \ + printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n"); \ + return -ENOMEM; \ + } \ + } else { \ + buf = buf_onstack; \ + } + +#define GETDQBUF(size) \ + char *buf; \ + char buf_onstack[STACK_ALLOC_SIZE] __aligned(8); \ + __GETDQBUF(size); + +#define FREEDQBUF() \ + if (unlikely(buf != buf_onstack)) \ + kfree(buf); static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) { @@ -82,12 +105,12 @@ static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) /* Remove empty block from list and return it */ static int get_free_dqblk(struct qtree_mem_dqinfo *info) { - char *buf = getdqbuf(info->dqi_usable_bs); - struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; + struct qt_disk_dqdbheader *dh; int ret, blk; + GETDQBUF(info->dqi_usable_bs); + + dh = (struct qt_disk_dqdbheader *)buf; - if (!buf) - return -ENOMEM; if (info->dqi_free_blk) { blk = info->dqi_free_blk; ret = read_blk(info, blk, buf); @@ -106,7 +129,7 @@ static int get_free_dqblk(struct qtree_mem_dqinfo *info) mark_info_dirty(info->dqi_sb, info->dqi_type); ret = blk; out_buf: - kfree(buf); + FREEDQBUF(); return ret; } @@ -128,83 +151,79 @@ static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk) } /* Remove given block from the list of blocks with free entries */ -static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, +static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *pbuf, uint blk) { - char *tmpbuf = getdqbuf(info->dqi_usable_bs); - struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; + struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)pbuf; uint nextblk = le32_to_cpu(dh->dqdh_next_free); uint prevblk = le32_to_cpu(dh->dqdh_prev_free); int err; + GETDQBUF(info->dqi_usable_bs); - if (!tmpbuf) - return -ENOMEM; if (nextblk) { - err = read_blk(info, nextblk, tmpbuf); + err = read_blk(info, nextblk, buf); if (err < 0) goto out_buf; - ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = + ((struct qt_disk_dqdbheader *)buf)->dqdh_prev_free = dh->dqdh_prev_free; - err = write_blk(info, nextblk, tmpbuf); + err = write_blk(info, nextblk, buf); if (err < 0) goto out_buf; } if (prevblk) { - err = read_blk(info, prevblk, tmpbuf); + err = read_blk(info, prevblk, buf); if (err < 0) goto out_buf; - ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free = + ((struct qt_disk_dqdbheader *)buf)->dqdh_next_free = dh->dqdh_next_free; - err = write_blk(info, prevblk, tmpbuf); + err = write_blk(info, prevblk, buf); if (err < 0) goto out_buf; } else { info->dqi_free_entry = nextblk; mark_info_dirty(info->dqi_sb, info->dqi_type); } - kfree(tmpbuf); + FREEDQBUF(); dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); /* No matter whether write succeeds block is out of list */ - if (write_blk(info, blk, buf) < 0) + if (write_blk(info, blk, pbuf) < 0) quota_error(info->dqi_sb, "Can't write block (%u) " "with free entries", blk); return 0; out_buf: - kfree(tmpbuf); + FREEDQBUF(); return err; } /* Insert given block to the beginning of list with free entries */ -static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, +static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *pbuf, uint blk) { - char *tmpbuf = getdqbuf(info->dqi_usable_bs); - struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; + struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)pbuf; int err; + GETDQBUF(info->dqi_usable_bs); - if (!tmpbuf) - return -ENOMEM; dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry); dh->dqdh_prev_free = cpu_to_le32(0); - err = write_blk(info, blk, buf); + err = write_blk(info, blk, pbuf); if (err < 0) goto out_buf; if (info->dqi_free_entry) { - err = read_blk(info, info->dqi_free_entry, tmpbuf); + err = read_blk(info, info->dqi_free_entry, buf); if (err < 0) goto out_buf; - ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = + ((struct qt_disk_dqdbheader *)buf)->dqdh_prev_free = cpu_to_le32(blk); - err = write_blk(info, info->dqi_free_entry, tmpbuf); + err = write_blk(info, info->dqi_free_entry, buf); if (err < 0) goto out_buf; } - kfree(tmpbuf); + FREEDQBUF(); info->dqi_free_entry = blk; mark_info_dirty(info->dqi_sb, info->dqi_type); return 0; out_buf: - kfree(tmpbuf); + FREEDQBUF(); return err; } @@ -226,11 +245,11 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, { uint blk, i; struct qt_disk_dqdbheader *dh; - char *buf = getdqbuf(info->dqi_usable_bs); char *ddquot; + GETDQBUF_NORET(info->dqi_usable_bs); *err = 0; - if (!buf) { + if (unlikely((buf != buf_onstack) && !buf)) { *err = -ENOMEM; return 0; } @@ -244,7 +263,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, blk = get_free_dqblk(info); if ((int)blk < 0) { *err = blk; - kfree(buf); + FREEDQBUF(); return 0; } memset(buf, 0, info->dqi_usable_bs); @@ -286,10 +305,10 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, dquot->dq_off = (blk << info->dqi_blocksize_bits) + sizeof(struct qt_disk_dqdbheader) + i * info->dqi_entry_size; - kfree(buf); + FREEDQBUF(); return blk; out_buf: - kfree(buf); + FREEDQBUF(); return 0; } @@ -297,13 +316,11 @@ out_buf: static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, uint *treeblk, int depth) { - char *buf = getdqbuf(info->dqi_usable_bs); int ret = 0, newson = 0, newact = 0; __le32 *ref; uint newblk; + GETDQBUF(info->dqi_usable_bs); - if (!buf) - return -ENOMEM; if (!*treeblk) { ret = get_free_dqblk(info); if (ret < 0) @@ -346,7 +363,7 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, put_free_dqblk(info, buf, *treeblk); } out_buf: - kfree(buf); + FREEDQBUF(); return ret; } @@ -374,10 +391,7 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) int type = dquot->dq_id.type; struct super_block *sb = dquot->dq_sb; ssize_t ret; - char *ddquot = getdqbuf(info->dqi_entry_size); - - if (!ddquot) - return -ENOMEM; + GETDQBUF(info->dqi_entry_size); /* dq_off is guarded by dqio_sem */ if (!dquot->dq_off) { @@ -385,14 +399,14 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) if (ret < 0) { quota_error(sb, "Error %zd occurred while creating " "quota", ret); - kfree(ddquot); + FREEDQBUF(); return ret; } } spin_lock(&dquot->dq_dqb_lock); - info->dqi_ops->mem2disk_dqblk(ddquot, dquot); + info->dqi_ops->mem2disk_dqblk(buf, dquot); spin_unlock(&dquot->dq_dqb_lock); - ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size, + ret = sb->s_op->quota_write(sb, type, buf, info->dqi_entry_size, dquot->dq_off); if (ret != info->dqi_entry_size) { quota_error(sb, "dquota write failed"); @@ -402,7 +416,7 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) ret = 0; } dqstats_inc(DQST_WRITES); - kfree(ddquot); + FREEDQBUF(); return ret; } @@ -413,11 +427,9 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, uint blk) { struct qt_disk_dqdbheader *dh; - char *buf = getdqbuf(info->dqi_usable_bs); int ret = 0; + GETDQBUF(info->dqi_usable_bs); - if (!buf) - return -ENOMEM; if (dquot->dq_off >> info->dqi_blocksize_bits != blk) { quota_error(dquot->dq_sb, "Quota structure has offset to " "other block (%u) than it should (%u)", blk, @@ -465,7 +477,7 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, } dquot->dq_off = 0; /* Quota is now unattached */ out_buf: - kfree(buf); + FREEDQBUF(); return ret; } @@ -473,13 +485,12 @@ out_buf: static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, uint *blk, int depth) { - char *buf = getdqbuf(info->dqi_usable_bs); int ret = 0; uint newblk; - __le32 *ref = (__le32 *)buf; + __le32 *ref; + GETDQBUF(info->dqi_usable_bs); + ref = (__le32 *)buf; - if (!buf) - return -ENOMEM; ret = read_blk(info, *blk, buf); if (ret < 0) { quota_error(dquot->dq_sb, "Can't read quota data block %u", @@ -513,7 +524,7 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, } } out_buf: - kfree(buf); + FREEDQBUF(); return ret; } @@ -532,13 +543,11 @@ EXPORT_SYMBOL(qtree_delete_dquot); static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, uint blk) { - char *buf = getdqbuf(info->dqi_usable_bs); loff_t ret = 0; int i; char *ddquot; + GETDQBUF(info->dqi_usable_bs); - if (!buf) - return -ENOMEM; ret = read_blk(info, blk, buf); if (ret < 0) { quota_error(dquot->dq_sb, "Can't read quota tree " @@ -562,7 +571,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, qt_disk_dqdbheader) + i * info->dqi_entry_size; } out_buf: - kfree(buf); + FREEDQBUF(); return ret; } @@ -570,12 +579,11 @@ out_buf: static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, uint blk, int depth) { - char *buf = getdqbuf(info->dqi_usable_bs); loff_t ret = 0; - __le32 *ref = (__le32 *)buf; + __le32 *ref; + GETDQBUF(info->dqi_usable_bs); + ref = (__le32 *)buf; - if (!buf) - return -ENOMEM; ret = read_blk(info, blk, buf); if (ret < 0) { quota_error(dquot->dq_sb, "Can't read quota tree block %u", @@ -591,7 +599,7 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, else ret = find_block_dqentry(info, dquot, blk); out_buf: - kfree(buf); + FREEDQBUF(); return ret; } @@ -607,8 +615,9 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) int type = dquot->dq_id.type; struct super_block *sb = dquot->dq_sb; loff_t offset; - char *ddquot; int ret = 0; + char *buf; + char buf_onstack[STACK_ALLOC_SIZE] __aligned(8); #ifdef __QUOTA_QT_PARANOIA /* Invalidated quota? */ @@ -634,10 +643,8 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) } dquot->dq_off = offset; } - ddquot = getdqbuf(info->dqi_entry_size); - if (!ddquot) - return -ENOMEM; - ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size, + __GETDQBUF(info->dqi_entry_size); + ret = sb->s_op->quota_read(sb, type, buf, info->dqi_entry_size, dquot->dq_off); if (ret != info->dqi_entry_size) { if (ret >= 0) @@ -646,18 +653,18 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) from_kqid(&init_user_ns, dquot->dq_id)); set_bit(DQ_FAKE_B, &dquot->dq_flags); memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); - kfree(ddquot); + FREEDQBUF(); goto out; } spin_lock(&dquot->dq_dqb_lock); - info->dqi_ops->disk2mem_dqblk(dquot, ddquot); + info->dqi_ops->disk2mem_dqblk(dquot, buf); if (!dquot->dq_dqb.dqb_bhardlimit && !dquot->dq_dqb.dqb_bsoftlimit && !dquot->dq_dqb.dqb_ihardlimit && !dquot->dq_dqb.dqb_isoftlimit) set_bit(DQ_FAKE_B, &dquot->dq_flags); spin_unlock(&dquot->dq_dqb_lock); - kfree(ddquot); + FREEDQBUF(); out: dqstats_inc(DQST_READS); return ret; @@ -678,15 +685,13 @@ EXPORT_SYMBOL(qtree_release_dquot); static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id, unsigned int blk, int depth) { - char *buf = getdqbuf(info->dqi_usable_bs); - __le32 *ref = (__le32 *)buf; + __le32 *ref; ssize_t ret; unsigned int epb = info->dqi_usable_bs >> 2; unsigned int level_inc = 1; int i; - - if (!buf) - return -ENOMEM; + GETDQBUF(info->dqi_usable_bs); + ref = (__le32 *)buf; for (i = depth; i < info->dqi_qtree_depth - 1; i++) level_inc *= epb; @@ -715,7 +720,7 @@ static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id, goto out_buf; } out_buf: - kfree(buf); + FREEDQBUF(); return ret; } diff --git a/fs/sdcardfs/file.c b/fs/sdcardfs/file.c index 271c4c4cb760..70134f3f296c 100644 --- a/fs/sdcardfs/file.c +++ b/fs/sdcardfs/file.c @@ -23,6 +23,8 @@ #include #endif +struct kmem_cache *kmem_file_info_pool; + static ssize_t sdcardfs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { @@ -256,7 +258,7 @@ static int sdcardfs_open(struct inode *inode, struct file *file) } file->private_data = - kzalloc(sizeof(struct sdcardfs_file_info), GFP_KERNEL); + kmem_cache_zalloc(kmem_file_info_pool, GFP_KERNEL); if (!SDCARDFS_F(file)) { err = -ENOMEM; goto out_revert_cred; @@ -278,7 +280,7 @@ static int sdcardfs_open(struct inode *inode, struct file *file) } if (err) - kfree(SDCARDFS_F(file)); + kmem_cache_free(kmem_file_info_pool, SDCARDFS_F(file)); else sdcardfs_copy_and_fix_attrs(inode, sdcardfs_lower_inode(inode)); @@ -314,7 +316,7 @@ static int sdcardfs_file_release(struct inode *inode, struct file *file) fput(lower_file); } - kfree(SDCARDFS_F(file)); + kmem_cache_free(kmem_file_info_pool, SDCARDFS_F(file)); return 0; } diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c index 886aee279920..6e6e4a83366f 100644 --- a/fs/sdcardfs/lookup.c +++ b/fs/sdcardfs/lookup.c @@ -234,9 +234,12 @@ static int sdcardfs_name_match(struct dir_context *ctx, const char *name, struct qstr candidate = QSTR_INIT(name, namelen); if (qstr_case_eq(buf->to_find, &candidate)) { - memcpy(buf->name, name, namelen); - buf->name[namelen] = 0; buf->found = true; + buf->name = kmalloc(namelen + 1, GFP_KERNEL); + if (buf->name) { + memcpy(buf->name, name, namelen); + buf->name[namelen] = '\0'; + } return 1; } return 0; @@ -284,33 +287,34 @@ static struct dentry *__sdcardfs_lookup(struct dentry *dentry, struct sdcardfs_name_data buffer = { .ctx.actor = sdcardfs_name_match, .to_find = name, - .name = __getname(), .found = false, }; - if (!buffer.name) { - err = -ENOMEM; - goto out; - } file = dentry_open(lower_parent_path, O_RDONLY, cred); if (IS_ERR(file)) { err = PTR_ERR(file); - goto put_name; + goto err; } + err = iterate_dir(file, &buffer.ctx); fput(file); if (err) - goto put_name; + goto err; + + if (buffer.found) { + if (!buffer.name) { + err = -ENOMEM; + goto out; + } - if (buffer.found) err = vfs_path_lookup(lower_dir_dentry, lower_dir_mnt, buffer.name, 0, &lower_path); - else + kfree(buffer.name); + } else { err = -ENOENT; -put_name: - __putname(buffer.name); + } } /* no error: handle positive dentries */ @@ -359,6 +363,7 @@ found: * We don't consider ENOENT an error, and we want to return a * negative dentry. */ +err: if (err && err != -ENOENT) goto out; diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c index 6e44903b3d2f..c4d60a1a2a16 100644 --- a/fs/sdcardfs/main.c +++ b/fs/sdcardfs/main.c @@ -148,10 +148,10 @@ static int parse_options(struct super_block *sb, char *options, int silent, } if (*debug) { - pr_info("sdcardfs : options - debug:%d\n", *debug); - pr_info("sdcardfs : options - uid:%d\n", + pr_debug("sdcardfs : options - debug:%d\n", *debug); + pr_debug("sdcardfs : options - uid:%d\n", opts->fs_low_uid); - pr_info("sdcardfs : options - gid:%d\n", + pr_debug("sdcardfs : options - gid:%d\n", opts->fs_low_gid); } @@ -212,9 +212,9 @@ int parse_options_remount(struct super_block *sb, char *options, int silent, } if (debug) { - pr_info("sdcardfs : options - debug:%d\n", debug); - pr_info("sdcardfs : options - gid:%d\n", vfsopts->gid); - pr_info("sdcardfs : options - mask:%d\n", vfsopts->mask); + pr_debug("sdcardfs : options - debug:%d\n", debug); + pr_debug("sdcardfs : options - gid:%d\n", vfsopts->gid); + pr_debug("sdcardfs : options - mask:%d\n", vfsopts->mask); } return 0; @@ -468,6 +468,12 @@ static int __init init_sdcardfs_fs(void) pr_info("Registering sdcardfs " SDCARDFS_VERSION "\n"); + kmem_file_info_pool = KMEM_CACHE(sdcardfs_file_info, SLAB_HWCACHE_ALIGN); + if (!kmem_file_info_pool) { + err = -ENOMEM; + goto err; + } + err = sdcardfs_init_inode_cache(); if (err) goto out; @@ -484,6 +490,7 @@ out: sdcardfs_destroy_dentry_cache(); packagelist_exit(); } +err: return err; } @@ -493,6 +500,7 @@ static void __exit exit_sdcardfs_fs(void) sdcardfs_destroy_dentry_cache(); packagelist_exit(); unregister_filesystem(&sdcardfs_fs_type); + kmem_cache_destroy(kmem_file_info_pool); pr_info("Completed sdcardfs module unload\n"); } diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h index 06b25aa736c5..ef45fed0761e 100644 --- a/fs/sdcardfs/sdcardfs.h +++ b/fs/sdcardfs/sdcardfs.h @@ -648,4 +648,6 @@ static inline bool qstr_case_eq(const struct qstr *q1, const struct qstr *q2) #define QSTR_LITERAL(string) QSTR_INIT(string, sizeof(string)-1) +extern struct kmem_cache *kmem_file_info_pool; + #endif /* not _SDCARDFS_H_ */ diff --git a/fs/select.c b/fs/select.c index 160721531f0b..d027addc9b31 100644 --- a/fs/select.c +++ b/fs/select.c @@ -240,7 +240,8 @@ int poll_schedule_timeout(struct poll_wqueues *pwq, int state, set_current_state(state); if (!pwq->triggered) - rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS); + rc = freezable_schedule_hrtimeout_range(expires, slack, + HRTIMER_MODE_ABS); __set_current_state(TASK_RUNNING); /* diff --git a/fs/xattr.c b/fs/xattr.c index 35ead9b764c6..ca362c155af8 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -531,6 +531,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value, ssize_t error; void *kvalue = NULL; char kname[XATTR_NAME_MAX + 1]; + char kvalue_onstack[255]; error = strncpy_from_user(kname, name, sizeof(kname)); if (error == 0 || error == sizeof(kname)) @@ -539,11 +540,15 @@ getxattr(struct dentry *d, const char __user *name, void __user *value, return error; if (size) { - if (size > XATTR_SIZE_MAX) - size = XATTR_SIZE_MAX; - kvalue = kvzalloc(size, GFP_KERNEL); - if (!kvalue) - return -ENOMEM; + if (size <= ARRAY_SIZE(kvalue_onstack)) { + kvalue = kvalue_onstack; + } else { + if (size > XATTR_SIZE_MAX) + size = XATTR_SIZE_MAX; + kvalue = kvzalloc(size, GFP_KERNEL); + if (!kvalue) + return -ENOMEM; + } } error = vfs_getxattr(d, kname, kvalue, size); @@ -559,7 +564,8 @@ getxattr(struct dentry *d, const char __user *name, void __user *value, error = -E2BIG; } - kvfree(kvalue); + if (kvalue != kvalue_onstack) + kvfree(kvalue); return error; } diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index 04deffaf5f7d..dd90c9792909 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h @@ -2,189 +2,67 @@ #ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_ #define _ASM_GENERIC_BITOPS_ATOMIC_H_ -#include -#include - -#ifdef CONFIG_SMP -#include -#include /* we use L1_CACHE_BYTES */ - -/* Use an array of spinlocks for our atomic_ts. - * Hash function to index into a different SPINLOCK. - * Since "a" is usually an address, use one spinlock per cacheline. - */ -# define ATOMIC_HASH_SIZE 4 -# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) - -extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; - -/* Can't use raw_spin_lock_irq because of #include problems, so - * this is the substitute */ -#define _atomic_spin_lock_irqsave(l,f) do { \ - arch_spinlock_t *s = ATOMIC_HASH(l); \ - local_irq_save(f); \ - arch_spin_lock(s); \ -} while(0) - -#define _atomic_spin_unlock_irqrestore(l,f) do { \ - arch_spinlock_t *s = ATOMIC_HASH(l); \ - arch_spin_unlock(s); \ - local_irq_restore(f); \ -} while(0) - - -#else -# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) -# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) -#endif +#include +#include +#include /* - * NMI events can occur at any time, including when interrupts have been - * disabled by *_irqsave(). So you can get NMI events occurring while a - * *_bit function is holding a spin lock. If the NMI handler also wants - * to do bit manipulation (and they do) then you can get a deadlock - * between the original caller of *_bit() and the NMI handler. - * - * by Keith Owens + * Implementation of atomic bitops using atomic-fetch ops. + * See Documentation/atomic_bitops.txt for details. */ -/** - * set_bit - Atomically set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * This function is atomic and may not be reordered. See __set_bit() - * if you do not require the atomic guarantees. - * - * Note: there are no guarantees that this function will not be reordered - * on non x86 architectures, so if you are writing portable code, - * make sure not to rely on its reordering guarantees. - * - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static inline void set_bit(int nr, volatile unsigned long *addr) +static inline void set_bit(unsigned int nr, volatile unsigned long *p) { - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long flags; - - _atomic_spin_lock_irqsave(p, flags); - *p |= mask; - _atomic_spin_unlock_irqrestore(p, flags); + p += BIT_WORD(nr); + atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p); } -/** - * clear_bit - Clears a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * clear_bit() is atomic and may not be reordered. However, it does - * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() - * in order to ensure changes are visible on other processors. - */ -static inline void clear_bit(int nr, volatile unsigned long *addr) +static inline void clear_bit(unsigned int nr, volatile unsigned long *p) { - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long flags; - - _atomic_spin_lock_irqsave(p, flags); - *p &= ~mask; - _atomic_spin_unlock_irqrestore(p, flags); + p += BIT_WORD(nr); + atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p); } -/** - * change_bit - Toggle a bit in memory - * @nr: Bit to change - * @addr: Address to start counting from - * - * change_bit() is atomic and may not be reordered. It may be - * reordered on other architectures than x86. - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static inline void change_bit(int nr, volatile unsigned long *addr) +static inline void change_bit(unsigned int nr, volatile unsigned long *p) { - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long flags; - - _atomic_spin_lock_irqsave(p, flags); - *p ^= mask; - _atomic_spin_unlock_irqrestore(p, flags); + p += BIT_WORD(nr); + atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p); } -/** - * test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It may be reordered on other architectures than x86. - * It also implies a memory barrier. - */ -static inline int test_and_set_bit(int nr, volatile unsigned long *addr) +static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p) { + long old; unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old; - unsigned long flags; - _atomic_spin_lock_irqsave(p, flags); - old = *p; - *p = old | mask; - _atomic_spin_unlock_irqrestore(p, flags); + p += BIT_WORD(nr); + if (READ_ONCE(*p) & mask) + return 1; - return (old & mask) != 0; + old = atomic_long_fetch_or(mask, (atomic_long_t *)p); + return !!(old & mask); } -/** - * test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It can be reorderdered on other architectures other than x86. - * It also implies a memory barrier. - */ -static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) +static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p) { + long old; unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old; - unsigned long flags; - _atomic_spin_lock_irqsave(p, flags); - old = *p; - *p = old & ~mask; - _atomic_spin_unlock_irqrestore(p, flags); + p += BIT_WORD(nr); + if (!(READ_ONCE(*p) & mask)) + return 0; - return (old & mask) != 0; + old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p); + return !!(old & mask); } -/** - * test_and_change_bit - Change a bit and return its old value - * @nr: Bit to change - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. - */ -static inline int test_and_change_bit(int nr, volatile unsigned long *addr) +static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p) { + long old; unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old; - unsigned long flags; - _atomic_spin_lock_irqsave(p, flags); - old = *p; - *p = old ^ mask; - _atomic_spin_unlock_irqrestore(p, flags); - - return (old & mask) != 0; + p += BIT_WORD(nr); + old = atomic_long_fetch_xor(mask, (atomic_long_t *)p); + return !!(old & mask); } #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */ diff --git a/include/linux/audit.h b/include/linux/audit.h index cb708eb8accc..00a489f12c91 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -212,6 +212,10 @@ static inline int audit_log_task_context(struct audit_buffer *ab) static inline void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk) { } +static inline int audit_update_lsm_rules(void) +{ + return 0; +} #define audit_enabled 0 #endif /* CONFIG_AUDIT */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 9d5f49008625..38eb332d8d4d 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -655,13 +655,12 @@ struct request_queue { #define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */ #define QUEUE_FLAG_INLINECRYPT 29 /* inline encryption support */ -#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ +#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_NONROT) | \ (1 << QUEUE_FLAG_STACKABLE) | \ - (1 << QUEUE_FLAG_SAME_COMP) | \ + (1 << QUEUE_FLAG_SAME_COMP) | \ (1 << QUEUE_FLAG_ADD_RANDOM)) -#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ - (1 << QUEUE_FLAG_STACKABLE) | \ +#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_STACKABLE) | \ (1 << QUEUE_FLAG_SAME_COMP) | \ (1 << QUEUE_FLAG_POLL)) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 02cb5761fef4..224c524243fe 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -90,8 +90,7 @@ * of extern inline functions at link time. * A lot of inline functions can cause havoc with function tracing. */ -#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ - !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) +#if !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) #define inline \ inline __attribute__((always_inline, unused)) notrace __gnu_inline #else diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 6d1e97606273..8cfb83625b67 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -579,17 +579,6 @@ struct governor_attr { size_t count); }; -static inline bool cpufreq_can_do_remote_dvfs(struct cpufreq_policy *policy) -{ - /* - * Allow remote callbacks if: - * - dvfs_possible_from_any_cpu flag is set - * - the local and remote CPUs share cpufreq policy - */ - return policy->dvfs_possible_from_any_cpu || - cpumask_test_cpu(smp_processor_id(), policy->cpus); -} - /********************************************************************* * FREQUENCY TABLE HELPERS * *********************************************************************/ diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 113a14833ad3..4d4c1065dbfb 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -277,4 +277,12 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov) __ret ? -1 : idx; \ }) +#ifdef CONFIG_SMP +void cpuidle_set_idle_cpu(unsigned int cpu); +void cpuidle_clear_idle_cpu(unsigned int cpu); +#else +static inline void cpuidle_set_idle_cpu(unsigned int cpu) { } +static inline void cpuidle_clear_idle_cpu(unsigned int cpu) { } +#endif + #endif /* _LINUX_CPUIDLE_H */ diff --git a/include/linux/dcache.h b/include/linux/dcache.h index ebaa74241fdb..7df967d0f2ea 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -76,7 +76,7 @@ extern struct dentry_stat_t dentry_stat; * large memory footprint increase). */ #ifdef CONFIG_64BIT -# define DNAME_INLINE_LEN 32 /* 192 bytes */ +# define DNAME_INLINE_LEN 32 + 192 /* 384 bytes */ #else # ifdef CONFIG_SMP # define DNAME_INLINE_LEN 36 /* 128 bytes */ diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index a3d4da1cb14d..28959a47adcb 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -433,6 +433,8 @@ struct dma_buf { } cb_excl, cb_shared; struct list_head refs; + + bool from_kmem; }; /** diff --git a/include/linux/dma-mapping-fast.h b/include/linux/dma-mapping-fast.h index b0d821e04424..e9dabab33a48 100644 --- a/include/linux/dma-mapping-fast.h +++ b/include/linux/dma-mapping-fast.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -17,7 +17,6 @@ #include struct dma_iommu_mapping; -struct io_pgtable_ops; struct dma_fast_smmu_mapping { struct device *dev; @@ -36,10 +35,12 @@ struct dma_fast_smmu_mapping { bool have_stale_tlbs; dma_addr_t pgtbl_dma_handle; - struct io_pgtable_ops *pgtbl_ops; + av8l_fast_iopte *pgtbl_pmds; spinlock_t lock; struct notifier_block notifier; + + int is_smmu_pt_coherent; }; #ifdef CONFIG_IOMMU_IO_PGTABLE_FAST diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index b8512af82bcf..bd81410f5f7e 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -578,7 +578,6 @@ static inline void dma_free_attrs(struct device *dev, size_t size, const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); - WARN_ON(irqs_disabled()); if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) return; diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 3995df1d068f..22f32f2cbc53 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -72,6 +72,7 @@ extern bool set_freezable(void); #ifdef CONFIG_CGROUP_FREEZER extern bool cgroup_freezing(struct task_struct *task); +extern bool cgroup_freezer_killable(struct task_struct *task); #else /* !CONFIG_CGROUP_FREEZER */ static inline bool cgroup_freezing(struct task_struct *task) { diff --git a/include/linux/fs.h b/include/linux/fs.h index dd472c614c75..86fb38c6ebd2 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1479,7 +1479,27 @@ static inline void i_gid_write(struct inode *inode, gid_t gid) inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid); } -extern struct timespec current_time(struct inode *inode); +/** + * current_time - Return FS time + * @inode: inode. + * + * Return the current time truncated to the time granularity supported by + * the fs. + * + * Note that inode and inode->sb cannot be NULL. + * Otherwise, the function warns and returns time without truncation. + */ +static inline struct timespec current_time(struct inode *inode) +{ + struct timespec now = current_kernel_time(); + + if (unlikely(!inode->i_sb)) { + WARN(1, "current_time() called with uninitialized super_block in the inode"); + return now; + } + + return timespec_trunc(now, inode->i_sb->s_time_gran); +} /* * Snapshotting support. @@ -1628,6 +1648,13 @@ extern struct dentry *vfs_tmpfile(struct vfsmount *mnt, struct dentry *dentry, umode_t mode, int open_flag); +#ifdef CONFIG_COMPAT +extern long compat_ptr_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); +#else +#define compat_ptr_ioctl NULL +#endif + /* * VFS file helper functions. */ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 17150610b68e..0a145ca24f0d 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -63,6 +63,8 @@ * interrupt handler after suspending interrupts. For system * wakeup devices users need to implement wakeup detection in * their interrupt handlers. + * IRQF_PERF_CRITICAL - Interrupt is critical to the overall performance of the + * system and should be processed on a fast CPU. */ #define IRQF_SHARED 0x00000080 #define IRQF_PROBE_SHARED 0x00000100 @@ -76,6 +78,7 @@ #define IRQF_NO_THREAD 0x00010000 #define IRQF_EARLY_RESUME 0x00020000 #define IRQF_COND_SUSPEND 0x00040000 +#define IRQF_PERF_CRITICAL 0x00080000 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) @@ -218,10 +221,13 @@ extern void enable_irq(unsigned int irq); extern void enable_percpu_irq(unsigned int irq, unsigned int type); extern bool irq_percpu_is_enabled(unsigned int irq); extern void irq_wake_thread(unsigned int irq, void *dev_id); +extern void irq_set_perf_affinity(unsigned int irq); /* The following three functions are for the core kernel use only. */ extern void suspend_device_irqs(void); extern void resume_device_irqs(void); +extern void unaffine_perf_irqs(void); +extern void reaffine_perf_irqs(void); /** * struct irq_affinity_notify - context for notification of IRQ affinity changes diff --git a/include/linux/io-pgtable-fast.h b/include/linux/io-pgtable-fast.h index 1d5e993c9d22..78b069369eb7 100644 --- a/include/linux/io-pgtable-fast.h +++ b/include/linux/io-pgtable-fast.h @@ -15,52 +15,13 @@ #include -/* - * This ought to be private to io-pgtable-fast, but dma-mapping-fast - * currently requires it for a debug usecase. - */ typedef u64 av8l_fast_iopte; -struct io_pgtable_ops; - -#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST - -int av8l_fast_map_public(struct io_pgtable_ops *ops, unsigned long iova, - phys_addr_t paddr, size_t size, int prot); - -void av8l_fast_unmap_public(struct io_pgtable_ops *ops, unsigned long iova, - size_t size); - -bool av8l_fast_iova_coherent_public(struct io_pgtable_ops *ops, - unsigned long iova); - -phys_addr_t av8l_fast_iova_to_phys_public(struct io_pgtable_ops *ops, - unsigned long iova); -#else -static inline int -av8l_fast_map_public(struct io_pgtable_ops *ops, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) -{ - return -EINVAL; -} -static inline void av8l_fast_unmap_public(struct io_pgtable_ops *ops, - unsigned long iova, size_t size) -{ -} - -static inline bool av8l_fast_iova_coherent_public(struct io_pgtable_ops *ops, - unsigned long iova) -{ - return false; -} -static inline phys_addr_t -av8l_fast_iova_to_phys_public(struct io_pgtable_ops *ops, - unsigned long iova) -{ - return 0; -} -#endif /* CONFIG_IOMMU_IO_PGTABLE_FAST */ +#define iopte_pmd_offset(pmds, iova) (pmds + (iova >> 12)) +int av8l_fast_map_public(av8l_fast_iopte *ptep, phys_addr_t paddr, size_t size, + int prot); +void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size); /* events for notifiers passed to av8l_register_notify */ #define MAPPED_OVER_STALE_TLB 1 @@ -75,18 +36,14 @@ av8l_fast_iova_to_phys_public(struct io_pgtable_ops *ops, */ #define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0xa -void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops, u64 base, - u64 start, u64 end, bool skip_sync); +void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds, bool skip_sync); void av8l_register_notify(struct notifier_block *nb); #else /* !CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB */ #define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0 -static inline void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops, - u64 base, - u64 start, - u64 end, +static inline void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds, bool skip_sync) { } diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 2c9fffc697fb..3e4f4fedda98 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -74,7 +74,7 @@ struct iommu_domain_geometry { }; struct iommu_pgtbl_info { - void *ops; + void *pmds; }; /* Domain feature flags */ diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index a27cf6652327..65bd1a32e982 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -286,19 +286,60 @@ extern unsigned long preset_lpj; #endif /* - * Convert various time units to each other: + * Convert jiffies to milliseconds and back. + * + * Avoid unnecessary multiplications/divisions in the + * two most common HZ cases: */ -extern unsigned int jiffies_to_msecs(const unsigned long j); -extern unsigned int jiffies_to_usecs(const unsigned long j); +static inline unsigned int jiffies_to_msecs(const unsigned long j) +{ +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (MSEC_PER_SEC / HZ) * j; +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); +#else +# if BITS_PER_LONG == 32 + return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >> + HZ_TO_MSEC_SHR32; +# else + return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN); +# endif +#endif +} + +static inline unsigned int jiffies_to_usecs(const unsigned long j) +{ + /* + * Hz usually doesn't go much further MSEC_PER_SEC. + * jiffies_to_usecs() and usecs_to_jiffies() depend on that. + */ + BUILD_BUG_ON(HZ > USEC_PER_SEC); + +#if !(USEC_PER_SEC % HZ) + return (USEC_PER_SEC / HZ) * j; +#else +# if BITS_PER_LONG == 32 + return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32; +# else + return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN; +# endif +#endif +} static inline u64 jiffies_to_nsecs(const unsigned long j) { return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC; } -extern u64 jiffies64_to_nsecs(u64 j); +static inline u64 jiffies64_to_nsecs(u64 j) +{ +#if !(NSEC_PER_SEC % HZ) + return (NSEC_PER_SEC / HZ) * j; +# else + return div_u64(j * HZ_TO_NSEC_NUM, HZ_TO_NSEC_DEN); +#endif +} -extern unsigned long __msecs_to_jiffies(const unsigned int m); #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) /* * HZ is equal to or smaller than 1000, and 1000 is a nice round @@ -335,6 +376,41 @@ static inline unsigned long _msecs_to_jiffies(const unsigned int m) return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32) >> MSEC_TO_HZ_SHR32; } #endif + +/** + * msecs_to_jiffies: - convert milliseconds to jiffies + * @m: time in milliseconds + * + * conversion is done as follows: + * + * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET) + * + * - 'too large' values [that would result in larger than + * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too. + * + * - all other values are converted to jiffies by either multiplying + * the input value by a factor or dividing it with a factor and + * handling any 32-bit overflows. + * for the details see __msecs_to_jiffies() + * + * msecs_to_jiffies() checks for the passed in value being a constant + * via __builtin_constant_p() allowing gcc to eliminate most of the + * code, __msecs_to_jiffies() is called if the value passed does not + * allow constant folding and the actual conversion must be done at + * runtime. + * the _msecs_to_jiffies helpers are the HZ dependent conversion + * routines found in include/linux/jiffies.h + */ +static inline unsigned long __msecs_to_jiffies(const unsigned int m) +{ + /* + * Negative value, means infinite timeout: + */ + if ((int)m < 0) + return MAX_JIFFY_OFFSET; + return _msecs_to_jiffies(m); +} + /** * msecs_to_jiffies: - convert milliseconds to jiffies * @m: time in milliseconds @@ -371,7 +447,6 @@ static __always_inline unsigned long msecs_to_jiffies(const unsigned int m) } } -extern unsigned long __usecs_to_jiffies(const unsigned int u); #if !(USEC_PER_SEC % HZ) static inline unsigned long _usecs_to_jiffies(const unsigned int u) { @@ -385,6 +460,13 @@ static inline unsigned long _usecs_to_jiffies(const unsigned int u) } #endif +static inline unsigned long __usecs_to_jiffies(const unsigned int u) +{ + if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; + return _usecs_to_jiffies(u); +} + /** * usecs_to_jiffies: - convert microseconds to jiffies * @u: time in microseconds @@ -418,9 +500,61 @@ static __always_inline unsigned long usecs_to_jiffies(const unsigned int u) } } -extern unsigned long timespec64_to_jiffies(const struct timespec64 *value); -extern void jiffies_to_timespec64(const unsigned long jiffies, - struct timespec64 *value); +/* + * The TICK_NSEC - 1 rounds up the value to the next resolution. Note + * that a remainder subtract here would not do the right thing as the + * resolution values don't fall on second boundries. I.e. the line: + * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding. + * Note that due to the small error in the multiplier here, this + * rounding is incorrect for sufficiently large values of tv_nsec, but + * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're + * OK. + * + * Rather, we just shift the bits off the right. + * + * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec + * value to a scaled second value. + */ +static inline unsigned long +__timespec64_to_jiffies(u64 sec, long nsec) +{ + nsec = nsec + TICK_NSEC - 1; + + if (sec >= MAX_SEC_IN_JIFFIES){ + sec = MAX_SEC_IN_JIFFIES; + nsec = 0; + } + return ((sec * SEC_CONVERSION) + + (((u64)nsec * NSEC_CONVERSION) >> + (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC; + +} + +static inline unsigned long +__timespec_to_jiffies(unsigned long sec, long nsec) +{ + return __timespec64_to_jiffies((u64)sec, nsec); +} + +static inline unsigned long +timespec64_to_jiffies(const struct timespec64 *value) +{ + return __timespec64_to_jiffies(value->tv_sec, value->tv_nsec); +} + +static inline void +jiffies_to_timespec64(const unsigned long jiffies, struct timespec64 *value) +{ + /* + * Convert jiffies to nanoseconds and separate with + * one divide. + */ + u32 rem; + value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC, + NSEC_PER_SEC, &rem); + value->tv_nsec = rem; +} + static inline unsigned long timespec_to_jiffies(const struct timespec *value) { struct timespec64 ts = timespec_to_timespec64(*value); @@ -437,21 +571,163 @@ static inline void jiffies_to_timespec(const unsigned long jiffies, *value = timespec64_to_timespec(ts); } -extern unsigned long timeval_to_jiffies(const struct timeval *value); -extern void jiffies_to_timeval(const unsigned long jiffies, - struct timeval *value); +/* + * We could use a similar algorithm to timespec_to_jiffies (with a + * different multiplier for usec instead of nsec). But this has a + * problem with rounding: we can't exactly add TICK_NSEC - 1 to the + * usec value, since it's not necessarily integral. + * + * We could instead round in the intermediate scaled representation + * (i.e. in units of 1/2^(large scale) jiffies) but that's also + * perilous: the scaling introduces a small positive error, which + * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1 + * units to the intermediate before shifting) leads to accidental + * overflow and overestimates. + * + * At the cost of one additional multiplication by a constant, just + * use the timespec implementation. + */ +static inline unsigned long +timeval_to_jiffies(const struct timeval *value) +{ + return __timespec_to_jiffies(value->tv_sec, + value->tv_usec * NSEC_PER_USEC); +} + +static inline void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value) +{ + /* + * Convert jiffies to nanoseconds and separate with + * one divide. + */ + u32 rem; + + value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC, + NSEC_PER_SEC, &rem); + value->tv_usec = rem / NSEC_PER_USEC; +} + +/* + * Convert jiffies/jiffies_64 to clock_t and back. + */ +static inline clock_t jiffies_to_clock_t(unsigned long x) +{ +#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 +# if HZ < USER_HZ + return x * (USER_HZ / HZ); +# else + return x / (HZ / USER_HZ); +# endif +#else + return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ); +#endif +} -extern clock_t jiffies_to_clock_t(unsigned long x); static inline clock_t jiffies_delta_to_clock_t(long delta) { return jiffies_to_clock_t(max(0L, delta)); } -extern unsigned long clock_t_to_jiffies(unsigned long x); -extern u64 jiffies_64_to_clock_t(u64 x); -extern u64 nsec_to_clock_t(u64 x); -extern u64 nsecs_to_jiffies64(u64 n); -extern unsigned long nsecs_to_jiffies(u64 n); +static inline unsigned long clock_t_to_jiffies(unsigned long x) +{ +#if (HZ % USER_HZ)==0 + if (x >= ~0UL / (HZ / USER_HZ)) + return ~0UL; + return x * (HZ / USER_HZ); +#else + /* Don't worry about loss of precision here .. */ + if (x >= ~0UL / HZ * USER_HZ) + return ~0UL; + + /* .. but do try to contain it here */ + return div_u64((u64)x * HZ, USER_HZ); +#endif +} + +static inline u64 jiffies_64_to_clock_t(u64 x) +{ +#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 +# if HZ < USER_HZ + x = div_u64(x * USER_HZ, HZ); +# elif HZ > USER_HZ + x = div_u64(x, HZ / USER_HZ); +# else + /* Nothing to do */ +# endif +#else + /* + * There are better ways that don't overflow early, + * but even this doesn't overflow in hundreds of years + * in 64 bits, so.. + */ + x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ)); +#endif + return x; +} + +static inline u64 nsec_to_clock_t(u64 x) +{ +#if (NSEC_PER_SEC % USER_HZ) == 0 + return div_u64(x, NSEC_PER_SEC / USER_HZ); +#elif (USER_HZ % 512) == 0 + return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512); +#else + /* + * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024, + * overflow after 64.99 years. + * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ... + */ + return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ); +#endif +} + +/** + * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64 + * + * @n: nsecs in u64 + * + * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64. + * And this doesn't return MAX_JIFFY_OFFSET since this function is designed + * for scheduler, not for use in device drivers to calculate timeout value. + * + * note: + * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512) + * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years + */ +static inline u64 nsecs_to_jiffies64(u64 n) +{ +#if (NSEC_PER_SEC % HZ) == 0 + /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */ + return div_u64(n, NSEC_PER_SEC / HZ); +#elif (HZ % 512) == 0 + /* overflow after 292 years if HZ = 1024 */ + return div_u64(n * HZ / 512, NSEC_PER_SEC / 512); +#else + /* + * Generic case - optimized for cases where HZ is a multiple of 3. + * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc. + */ + return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ); +#endif +} + +/** + * nsecs_to_jiffies - Convert nsecs in u64 to jiffies + * + * @n: nsecs in u64 + * + * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64. + * And this doesn't return MAX_JIFFY_OFFSET since this function is designed + * for scheduler, not for use in device drivers to calculate timeout value. + * + * note: + * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512) + * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years + */ +static inline unsigned long nsecs_to_jiffies(u64 n) +{ + return (unsigned long)nsecs_to_jiffies64(n); +} #define TIMESTAMP_SIZE 30 diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index cd0d2270998f..b68d02edd3c1 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -119,6 +119,68 @@ struct static_key { #ifdef HAVE_JUMP_LABEL #include + +#ifndef __ASSEMBLY__ +#ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE + +struct jump_entry { + s32 code; + s32 target; + long key; // key may be far away from the core kernel under KASLR +}; + +static inline unsigned long jump_entry_code(const struct jump_entry *entry) +{ + return (unsigned long)&entry->code + entry->code; +} + +static inline unsigned long jump_entry_target(const struct jump_entry *entry) +{ + return (unsigned long)&entry->target + entry->target; +} + +static inline struct static_key *jump_entry_key(const struct jump_entry *entry) +{ + long offset = entry->key & ~3L; + + return (struct static_key *)((unsigned long)&entry->key + offset); +} + +#else + +static inline unsigned long jump_entry_code(const struct jump_entry *entry) +{ + return entry->code; +} + +static inline unsigned long jump_entry_target(const struct jump_entry *entry) +{ + return entry->target; +} + +static inline struct static_key *jump_entry_key(const struct jump_entry *entry) +{ + return (struct static_key *)((unsigned long)entry->key & ~3UL); +} + +#endif + +static inline bool jump_entry_is_branch(const struct jump_entry *entry) +{ + return (unsigned long)entry->key & 1UL; +} + +static inline bool jump_entry_is_init(const struct jump_entry *entry) +{ + return (unsigned long)entry->key & 2UL; +} + +static inline void jump_entry_set_init(struct jump_entry *entry) +{ + entry->key |= 2; +} + +#endif #endif #ifndef __ASSEMBLY__ @@ -393,7 +455,7 @@ extern bool ____wrong_branch_error(void); branch = !arch_static_branch_jump(&(x)->key, true); \ else \ branch = ____wrong_branch_error(); \ - branch; \ + likely(branch); \ }) #define static_branch_unlikely(x) \ @@ -405,7 +467,7 @@ extern bool ____wrong_branch_error(void); branch = arch_static_branch(&(x)->key, false); \ else \ branch = ____wrong_branch_error(); \ - branch; \ + unlikely(branch); \ }) #else /* !HAVE_JUMP_LABEL */ diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h index fc13ff289903..689d7338d449 100644 --- a/include/linux/jump_label_ratelimit.h +++ b/include/linux/jump_label_ratelimit.h @@ -14,21 +14,79 @@ struct static_key_deferred { #endif #ifdef HAVE_JUMP_LABEL -extern void static_key_slow_dec_deferred(struct static_key_deferred *key); -extern void static_key_deferred_flush(struct static_key_deferred *key); +struct static_key_true_deferred { + struct static_key_true key; + unsigned long timeout; + struct delayed_work work; +}; + +struct static_key_false_deferred { + struct static_key_false key; + unsigned long timeout; + struct delayed_work work; +}; + +#define static_key_slow_dec_deferred(x) \ + __static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout) +#define static_branch_slow_dec_deferred(x) \ + __static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout) + +#define static_key_deferred_flush(x) \ + __static_key_deferred_flush((x), &(x)->work) + +extern void +__static_key_slow_dec_deferred(struct static_key *key, + struct delayed_work *work, + unsigned long timeout); +extern void __static_key_deferred_flush(void *key, struct delayed_work *work); extern void jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl); +extern void jump_label_update_timeout(struct work_struct *work); + +#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \ + struct static_key_true_deferred name = { \ + .key = { STATIC_KEY_INIT_TRUE }, \ + .timeout = (rl), \ + .work = __DELAYED_WORK_INITIALIZER((name).work, \ + jump_label_update_timeout, \ + 0), \ + } + +#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \ + struct static_key_false_deferred name = { \ + .key = { STATIC_KEY_INIT_FALSE }, \ + .timeout = (rl), \ + .work = __DELAYED_WORK_INITIALIZER((name).work, \ + jump_label_update_timeout, \ + 0), \ + } + +#define static_branch_deferred_inc(x) static_branch_inc(&(x)->key) + #else /* !HAVE_JUMP_LABEL */ struct static_key_deferred { struct static_key key; }; +struct static_key_true_deferred { + struct static_key_true key; +}; +struct static_key_false_deferred { + struct static_key_false key; +}; +#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \ + struct static_key_true_deferred name = { STATIC_KEY_TRUE_INIT } +#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \ + struct static_key_false_deferred name = { STATIC_KEY_FALSE_INIT } + +#define static_branch_slow_dec_deferred(x) static_branch_dec(&(x)->key) + static inline void static_key_slow_dec_deferred(struct static_key_deferred *key) { STATIC_KEY_CHECK_USE(); static_key_slow_dec(&key->key); } -static inline void static_key_deferred_flush(struct static_key_deferred *key) +static inline void static_key_deferred_flush(void *key) { STATIC_KEY_CHECK_USE(); } diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 16c187a1089d..17446a554383 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -474,6 +474,7 @@ extern bool parse_option_str(const char *str, const char *option); extern char *next_arg(char *args, char **param, char **val); extern int core_kernel_text(unsigned long addr); +extern int init_kernel_text(unsigned long addr); extern int core_kernel_data(unsigned long addr); extern int __kernel_text_address(unsigned long addr); extern int kernel_text_address(unsigned long addr); @@ -674,6 +675,9 @@ do { \ * let gcc optimize the rest. */ +#ifdef CONFIG_DISABLE_TRACE_PRINTK +#define trace_printk pr_debug +#else #define trace_printk(fmt, ...) \ do { \ char _______STR[] = __stringify((__VA_ARGS__)); \ @@ -696,6 +700,7 @@ do { \ else \ __trace_printk(_THIS_IP_, fmt, ##args); \ } while (0) +#endif extern __printf(2, 3) int __trace_bprintk(unsigned long ip, const char *fmt, ...); diff --git a/include/linux/kthread.h b/include/linux/kthread.h index c3701de9dc74..413b05795a65 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -49,6 +49,23 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), __k; \ }) +/** + * kthread_run_perf_critical - create and wake a performance-critical thread. + * + * Same as kthread_run(), but with the kthread bound to performance CPUs. + */ +#define kthread_run_perf_critical(threadfn, data, namefmt, ...) \ +({ \ + struct task_struct *__k \ + = kthread_create(threadfn, data, namefmt, ## __VA_ARGS__); \ + if (!IS_ERR(__k)) { \ + __k->flags |= PF_PERF_CRITICAL; \ + kthread_bind_mask(__k, cpu_perf_mask); \ + wake_up_process(__k); \ + } \ + __k; \ +}) + void free_kthread_struct(struct task_struct *k); void kthread_bind(struct task_struct *k, unsigned int cpu); void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h index 915330abf6e5..7ae9dcf89578 100644 --- a/include/linux/lsm_audit.h +++ b/include/linux/lsm_audit.h @@ -117,8 +117,16 @@ int ipv4_skb_to_auditdata(struct sk_buff *skb, int ipv6_skb_to_auditdata(struct sk_buff *skb, struct common_audit_data *ad, u8 *proto); +#ifdef CONFIG_AUDIT void common_lsm_audit(struct common_audit_data *a, void (*pre_audit)(struct audit_buffer *, void *), void (*post_audit)(struct audit_buffer *, void *)); +#else +static inline void common_lsm_audit(struct common_audit_data *a, + void (*pre_audit)(struct audit_buffer *, void *), + void (*post_audit)(struct audit_buffer *, void *)) +{ +} +#endif #endif diff --git a/include/linux/msm_rtb.h b/include/linux/msm_rtb.h deleted file mode 100644 index 6e1d1db5e3b6..000000000000 --- a/include/linux/msm_rtb.h +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (c) 2012-2014, 2016 The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#ifndef __MSM_RTB_H__ -#define __MSM_RTB_H__ - -/* - * These numbers are used from the kernel command line and sysfs - * to control filtering. Remove items from here with extreme caution. - */ -enum logk_event_type { - LOGK_NONE = 0, - LOGK_READL = 1, - LOGK_WRITEL = 2, - LOGK_LOGBUF = 3, - LOGK_HOTPLUG = 4, - LOGK_CTXID = 5, - LOGK_TIMESTAMP = 6, - LOGK_L2CPREAD = 7, - LOGK_L2CPWRITE = 8, - LOGK_IRQ = 9, -}; - -#define LOGTYPE_NOPC 0x80 - -struct msm_rtb_platform_data { - unsigned int size; -}; - -#if defined(CONFIG_QCOM_RTB) -/* - * returns 1 if data was logged, 0 otherwise - */ -int uncached_logk_pc(enum logk_event_type log_type, void *caller, - void *data); - -/* - * returns 1 if data was logged, 0 otherwise - */ -int uncached_logk(enum logk_event_type log_type, void *data); - -#define ETB_WAYPOINT do { \ - BRANCH_TO_NEXT_ISTR; \ - nop(); \ - BRANCH_TO_NEXT_ISTR; \ - nop(); \ - } while (0) - -#define BRANCH_TO_NEXT_ISTR \ - do { \ - asm volatile("b .+4\n" : : : "memory"); \ - } while (0) - -/* - * both the mb and the isb are needed to ensure enough waypoints for - * etb tracing - */ -#define LOG_BARRIER do { \ - mb(); \ - isb(); \ - } while (0) -#else - -static inline int uncached_logk_pc(enum logk_event_type log_type, - void *caller, - void *data) { return 0; } - -static inline int uncached_logk(enum logk_event_type log_type, - void *data) { return 0; } - -#define ETB_WAYPOINT -#define BRANCH_TO_NEXT_ISTR -/* - * Due to a GCC bug, we need to have a nop here in order to prevent an extra - * read from being generated after the write. - */ -#define LOG_BARRIER nop() -#endif -#endif diff --git a/include/linux/oom.h b/include/linux/oom.h index 395ac25d0cfc..c9ad7f044997 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -122,8 +122,4 @@ extern void wake_oom_reaper(struct task_struct *tsk); extern int sysctl_oom_dump_tasks; extern int sysctl_oom_kill_allocating_task; extern int sysctl_panic_on_oom; -extern int sysctl_reap_mem_on_sigkill; - -/* calls for LMK reaper */ -extern void add_to_oom_reaper(struct task_struct *p); #endif /* _INCLUDE_LINUX_OOM_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 3130e0eabd77..cafb4e0423bb 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1630,6 +1630,7 @@ static inline struct pci_dev *pci_get_class(unsigned int class, #define pci_dev_put(dev) do { } while (0) static inline void pci_set_master(struct pci_dev *dev) { } +static inline void pci_clear_master(struct pci_dev *dev) { } static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; } static inline void pci_disable_device(struct pci_dev *dev) { } static inline int pci_assign_resource(struct pci_dev *dev, int i) @@ -1665,6 +1666,10 @@ static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, static inline struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res) { return NULL; } +static inline int pci_request_region(struct pci_dev *pdev, int bar, + const char *res_name) +{ return -EIO; } +static inline void pci_release_region(struct pci_dev *pdev, int bar) { } static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) { return -EIO; } static inline void pci_release_regions(struct pci_dev *dev) { } diff --git a/include/linux/profile.h b/include/linux/profile.h index bad18ca43150..2b0561f450d6 100644 --- a/include/linux/profile.h +++ b/include/linux/profile.h @@ -67,9 +67,6 @@ static inline void profile_hit(int type, void *ip) struct task_struct; struct mm_struct; -/* task is in do_exit() */ -void profile_task_exit(struct task_struct * task); - /* task is dead, free task struct ? Returns 1 if * the task was taken, 0 if the task should be freed. */ @@ -81,9 +78,6 @@ void profile_munmap(unsigned long addr); int task_handoff_register(struct notifier_block * n); int task_handoff_unregister(struct notifier_block * n); -int profile_event_register(enum profile_type, struct notifier_block * n); -int profile_event_unregister(enum profile_type, struct notifier_block * n); - struct pt_regs; #else @@ -120,20 +114,15 @@ static inline int task_handoff_unregister(struct notifier_block * n) return -ENOSYS; } -static inline int profile_event_register(enum profile_type t, struct notifier_block * n) -{ - return -ENOSYS; -} - -static inline int profile_event_unregister(enum profile_type t, struct notifier_block * n) -{ - return -ENOSYS; -} - -#define profile_task_exit(a) do { } while (0) #define profile_handoff_task(a) (0) #define profile_munmap(a) do { } while (0) #endif /* CONFIG_PROFILING */ +/* task is in do_exit() */ +void profile_task_exit(struct task_struct * task); + +int profile_event_register(enum profile_type, struct notifier_block * n); +int profile_event_unregister(enum profile_type, struct notifier_block * n); + #endif /* _LINUX_PROFILE_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 0e460d26a8cd..bf4c58ef6096 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -555,7 +555,6 @@ extern u32 sched_get_init_task_load(struct task_struct *p); extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin, u32 fmax); extern int sched_set_boost(int enable); -extern void free_task_load_ptrs(struct task_struct *p); #define RAVG_HIST_SIZE_MAX 5 #define NUM_BUSY_BUCKETS 10 @@ -599,7 +598,7 @@ struct ravg { u32 sum, demand; u32 coloc_demand; u32 sum_history[RAVG_HIST_SIZE_MAX]; - u32 *curr_window_cpu, *prev_window_cpu; + u32 curr_window_cpu[CONFIG_NR_CPUS], prev_window_cpu[CONFIG_NR_CPUS]; u32 curr_window, prev_window; u16 active_windows; u32 pred_demand; @@ -620,7 +619,6 @@ static inline int sched_set_boost(int enable) { return -EINVAL; } -static inline void free_task_load_ptrs(struct task_struct *p) { } static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin, u32 fmax) { } @@ -1576,6 +1574,7 @@ extern struct pid *cad_pid; #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ #define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */ +#define PF_PERF_CRITICAL 0x02000000 /* Thread is performance-critical */ #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_WAKE_UP_IDLE 0x10000000 /* TTWU on an idle CPU */ diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h index e0234142b2f2..e38b20cb7810 100644 --- a/include/linux/sched/cpufreq.h +++ b/include/linux/sched/cpufreq.h @@ -21,6 +21,8 @@ #define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL) #ifdef CONFIG_CPU_FREQ +struct cpufreq_policy; + struct update_util_data { void (*func)(struct update_util_data *data, u64 time, unsigned int flags); }; @@ -29,6 +31,14 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, void (*func)(struct update_util_data *data, u64 time, unsigned int flags)); void cpufreq_remove_update_util_hook(int cpu); + +bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy); + +static inline unsigned long map_util_freq(unsigned long util, + unsigned long freq, unsigned long cap) +{ + return (freq + (freq >> 2)) * util / cap; +} #endif /* CONFIG_CPU_FREQ */ #endif /* _LINUX_SCHED_CPUFREQ_H */ diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index c4da504fbfc0..cc4259116a6d 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -45,7 +45,6 @@ extern unsigned int sysctl_sched_many_wakeup_threshold; extern unsigned int sysctl_sched_walt_rotate_big_tasks; extern unsigned int sysctl_sched_min_task_util_for_boost; extern unsigned int sysctl_sched_min_task_util_for_colocation; -extern unsigned int sysctl_sched_little_cluster_coloc_fmin_khz; extern int walt_proc_update_handler(struct ctl_table *table, int write, @@ -75,15 +74,15 @@ extern unsigned int sysctl_numa_balancing_scan_period_min; extern unsigned int sysctl_numa_balancing_scan_period_max; extern unsigned int sysctl_numa_balancing_scan_size; -#ifdef CONFIG_SCHED_DEBUG extern __read_mostly unsigned int sysctl_sched_migration_cost; +#ifdef CONFIG_SCHED_DEBUG extern __read_mostly unsigned int sysctl_sched_nr_migrate; extern __read_mostly unsigned int sysctl_sched_time_avg; +#endif int sched_proc_update_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos); -#endif extern int sched_boost_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); @@ -128,12 +127,6 @@ extern int sysctl_schedstats(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); -#ifdef CONFIG_SCHED_WALT -extern int sched_little_cluster_coloc_fmin_khz_handler(struct ctl_table *table, - int write, void __user *buffer, - size_t *lenp, loff_t *ppos); -#endif - #define LIB_PATH_LENGTH 512 extern char sched_lib_name[LIB_PATH_LENGTH]; extern unsigned int sched_lib_mask_force; diff --git a/include/linux/sde_rsc.h b/include/linux/sde_rsc.h index d6e189685171..c32130777e02 100644 --- a/include/linux/sde_rsc.h +++ b/include/linux/sde_rsc.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -55,10 +55,6 @@ */ #define SDE_RSC_EVENT_SOLVER_DISABLED 0x20 -#define SDE_RSC_REV_1 0x1 -#define SDE_RSC_REV_2 0x2 -#define SDE_RSC_REV_3 0x3 - /** * sde_rsc_client_type: sde rsc client type information * SDE_RSC_PRIMARY_DISP_CLIENT: A primary display client which can request @@ -308,16 +304,6 @@ int get_sde_rsc_primary_crtc(int rsc_index); */ int sde_rsc_client_trigger_vote(struct sde_rsc_client *caller_client, bool delta_vote); - -/** - * get_sde_rsc_version - get the supported rsc version - * - * @rsc_index: A client will be created on this RSC. As of now only - * SDE_RSC_INDEX is valid rsc index. - * Return the rsc version. - */ -u32 get_sde_rsc_version(int rsc_index); - #else static inline struct sde_rsc_client *sde_rsc_client_create(u32 rsc_index, @@ -392,13 +378,6 @@ static inline int sde_rsc_client_trigger_vote( { return 0; } - - -static inline u32 get_sde_rsc_version(int rsc_index) -{ - return 0; -} - #endif /* CONFIG_DRM_SDE_RSC */ #endif /* _SDE_RSC_H_ */ diff --git a/include/linux/time.h b/include/linux/time.h index 21086c5143d9..a3fffb7769ef 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -49,9 +49,45 @@ static inline int timeval_compare(const struct timeval *lhs, const struct timeva return lhs->tv_usec - rhs->tv_usec; } -extern time64_t mktime64(const unsigned int year, const unsigned int mon, - const unsigned int day, const unsigned int hour, - const unsigned int min, const unsigned int sec); +/* + * mktime64 - Converts date to seconds. + * Converts Gregorian date to seconds since 1970-01-01 00:00:00. + * Assumes input in normal date format, i.e. 1980-12-31 23:59:59 + * => year=1980, mon=12, day=31, hour=23, min=59, sec=59. + * + * [For the Julian calendar (which was used in Russia before 1917, + * Britain & colonies before 1752, anywhere else before 1582, + * and is still in use by some communities) leave out the + * -year/100+year/400 terms, and add 10.] + * + * This algorithm was first published by Gauss (I think). + * + * A leap second can be indicated by calling this function with sec as + * 60 (allowable under ISO 8601). The leap second is treated the same + * as the following second since they don't exist in UNIX time. + * + * An encoding of midnight at the end of the day as 24:00:00 - ie. midnight + * tomorrow - (allowable under ISO 8601) is supported. + */ +static inline time64_t mktime64(const unsigned int year0, const unsigned int mon0, + const unsigned int day, const unsigned int hour, + const unsigned int min, const unsigned int sec) +{ + unsigned int mon = mon0, year = year0; + + /* 1..12 -> 11,12,1..10 */ + if (0 >= (int) (mon -= 2)) { + mon += 12; /* Puts Feb last since it has leap day */ + year -= 1; + } + + return ((((time64_t) + (year/4 - year/100 + year/400 + 367*mon/12 + day) + + year*365 - 719499 + )*24 + hour /* now have hours - midnight tomorrow handled here */ + )*60 + min /* now have minutes */ + )*60 + sec; /* finally seconds */ +} /** * Deprecated. Use mktime64(). @@ -64,15 +100,62 @@ static inline unsigned long mktime(const unsigned int year, return mktime64(year, mon, day, hour, min, sec); } -extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); +/** + * set_normalized_timespec - set timespec sec and nsec parts and normalize + * + * @ts: pointer to timespec variable to be set + * @sec: seconds to set + * @nsec: nanoseconds to set + * + * Set seconds and nanoseconds field of a timespec variable and + * normalize to the timespec storage format + * + * Note: The tv_nsec part is always in the range of + * 0 <= tv_nsec < NSEC_PER_SEC + * For negative values only the tv_sec field is negative ! + */ +static inline void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec) +{ + while (nsec >= NSEC_PER_SEC) { + /* + * The following asm() prevents the compiler from + * optimising this loop into a modulo operation. See + * also __iter_div_u64_rem() in include/linux/time.h + */ + asm("" : "+rm"(nsec)); + nsec -= NSEC_PER_SEC; + ++sec; + } + while (nsec < 0) { + asm("" : "+rm"(nsec)); + nsec += NSEC_PER_SEC; + --sec; + } + ts->tv_sec = sec; + ts->tv_nsec = nsec; +} /* * timespec_add_safe assumes both values are positive and checks * for overflow. It will return TIME_T_MAX if the reutrn would be * smaller then either of the arguments. + * + * Add two timespec values and do a safety check for overflow. + * It's assumed that both values are valid (>= 0) */ -extern struct timespec timespec_add_safe(const struct timespec lhs, - const struct timespec rhs); +static inline struct timespec timespec_add_safe(const struct timespec lhs, + const struct timespec rhs) +{ + struct timespec res; + + set_normalized_timespec(&res, lhs.tv_sec + rhs.tv_sec, + lhs.tv_nsec + rhs.tv_nsec); + + if (res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec) + res.tv_sec = TIME_T_MAX; + + return res; +} static inline struct timespec timespec_add(struct timespec lhs, @@ -133,7 +216,28 @@ static inline bool timeval_valid(const struct timeval *tv) return true; } -extern struct timespec timespec_trunc(struct timespec t, unsigned gran); +/** + * timespec_trunc - Truncate timespec to a granularity + * @t: Timespec + * @gran: Granularity in ns. + * + * Truncate a timespec to a granularity. Always rounds down. gran must + * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns). + */ +static inline struct timespec timespec_trunc(struct timespec t, unsigned gran) +{ + /* Avoid division in the common cases 1 ns and 1 s. */ + if (gran == 1) { + /* nothing */ + } else if (gran == NSEC_PER_SEC) { + t.tv_nsec = 0; + } else if (gran > 1 && gran < NSEC_PER_SEC) { + t.tv_nsec -= t.tv_nsec % gran; + } else { + WARN(1, "illegal file time granularity: %u", gran); + } + return t; +} /* * Validates if a timespec/timeval used to inject a time offset is valid. @@ -249,19 +353,44 @@ static inline s64 timeval_to_ns(const struct timeval *tv) /** * ns_to_timespec - Convert nanoseconds to timespec - * @nsec: the nanoseconds value to be converted + * @nsec: the nanoseconds value to be converted * * Returns the timespec representation of the nsec parameter. */ -extern struct timespec ns_to_timespec(const s64 nsec); +static inline struct timespec ns_to_timespec(const s64 nsec) +{ + struct timespec ts; + s32 rem; + + if (!nsec) + return (struct timespec) {0, 0}; + + ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem); + if (unlikely(rem < 0)) { + ts.tv_sec--; + rem += NSEC_PER_SEC; + } + ts.tv_nsec = rem; + + return ts; +} /** * ns_to_timeval - Convert nanoseconds to timeval - * @nsec: the nanoseconds value to be converted + * @nsec: the nanoseconds value to be converted * * Returns the timeval representation of the nsec parameter. */ -extern struct timeval ns_to_timeval(const s64 nsec); +static inline struct timeval ns_to_timeval(const s64 nsec) +{ + struct timespec ts = ns_to_timespec(nsec); + struct timeval tv; + + tv.tv_sec = ts.tv_sec; + tv.tv_usec = (suseconds_t) ts.tv_nsec / 1000; + + return tv; +} /** * timespec_add_ns - Adds nanoseconds to a timespec diff --git a/include/linux/time64.h b/include/linux/time64.h index ad33260618f7..b091abd866ab 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -133,7 +133,40 @@ static inline int timespec64_compare(const struct timespec64 *lhs, const struct return lhs->tv_nsec - rhs->tv_nsec; } -extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec); +/** + * set_normalized_timespec - set timespec sec and nsec parts and normalize + * + * @ts: pointer to timespec variable to be set + * @sec: seconds to set + * @nsec: nanoseconds to set + * + * Set seconds and nanoseconds field of a timespec variable and + * normalize to the timespec storage format + * + * Note: The tv_nsec part is always in the range of + * 0 <= tv_nsec < NSEC_PER_SEC + * For negative values only the tv_sec field is negative ! + */ +static inline void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec) +{ + while (nsec >= NSEC_PER_SEC) { + /* + * The following asm() prevents the compiler from + * optimising this loop into a modulo operation. See + * also __iter_div_u64_rem() in include/linux/time.h + */ + asm("" : "+rm"(nsec)); + nsec -= NSEC_PER_SEC; + ++sec; + } + while (nsec < 0) { + asm("" : "+rm"(nsec)); + nsec += NSEC_PER_SEC; + --sec; + } + ts->tv_sec = sec; + ts->tv_nsec = nsec; +} static inline struct timespec64 timespec64_add(struct timespec64 lhs, struct timespec64 rhs) @@ -194,11 +227,27 @@ static inline s64 timespec64_to_ns(const struct timespec64 *ts) /** * ns_to_timespec64 - Convert nanoseconds to timespec64 - * @nsec: the nanoseconds value to be converted + * @nsec: the nanoseconds value to be converted * * Returns the timespec64 representation of the nsec parameter. */ -extern struct timespec64 ns_to_timespec64(const s64 nsec); +static inline struct timespec64 ns_to_timespec64(const s64 nsec) +{ + struct timespec64 ts; + s32 rem; + + if (!nsec) + return (struct timespec64) {0, 0}; + + ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem); + if (unlikely(rem < 0)) { + ts.tv_sec--; + rem += NSEC_PER_SEC; + } + ts.tv_nsec = rem; + + return ts; +} /** * timespec64_add_ns - Adds nanoseconds to a timespec64 diff --git a/include/linux/timer.h b/include/linux/timer.h index 1b85e41d67ea..07d4683b8dd4 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -208,6 +208,14 @@ static inline void timer_setup(struct timer_list *timer, } #endif +static inline void timer_setup_on_stack(struct timer_list *timer, + void (*callback)(struct timer_list *), + unsigned int flags) +{ + __setup_timer_on_stack(timer, (TIMER_FUNC_TYPE)callback, + (TIMER_DATA_TYPE)timer, flags); +} + #define from_timer(var, callback_timer, timer_fieldname) \ container_of(callback_timer, typeof(*var), timer_fieldname) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 1c527abb1ae5..0ffb8a16f962 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -607,6 +607,15 @@ static inline bool schedule_delayed_work(struct delayed_work *dwork, return queue_delayed_work(system_wq, dwork, delay); } +/** + * delayed_work_busy - See work_busy() + * @dwork: the delayed work to be tested + */ +static inline unsigned int delayed_work_busy(struct delayed_work *dwork) +{ + return work_busy(&dwork->work); +} + #ifndef CONFIG_SMP static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) { diff --git a/include/trace/events/power.h b/include/trace/events/power.h index b4751071288b..988f49c6cb62 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -629,8 +629,8 @@ TRACE_EVENT(sugov_util_update, TP_PROTO(int cpu, unsigned long util, unsigned long avg_cap, unsigned long max_cap, unsigned long nl, unsigned long pl, - unsigned int flags), - TP_ARGS(cpu, util, avg_cap, max_cap, nl, pl, flags), + unsigned int rtgb, unsigned int flags), + TP_ARGS(cpu, util, avg_cap, max_cap, nl, pl, rtgb, flags), TP_STRUCT__entry( __field( int, cpu) __field( unsigned long, util) @@ -639,6 +639,7 @@ TRACE_EVENT(sugov_util_update, __field( unsigned long, nl) __field( unsigned long, pl) __field( unsigned int, flags) + __field( unsigned int, rtgb) ), TP_fast_assign( __entry->cpu = cpu; @@ -647,12 +648,13 @@ TRACE_EVENT(sugov_util_update, __entry->max_cap = max_cap; __entry->nl = nl; __entry->pl = pl; + __entry->rtgb = rtgb; __entry->flags = flags; ), - TP_printk("cpu=%d util=%lu avg_cap=%lu max_cap=%lu nl=%lu pl=%lu flags=0x%x", + TP_printk("cpu=%d util=%lu avg_cap=%lu max_cap=%lu nl=%lu pl=%lu rtgb=%u flags=0x%x", __entry->cpu, __entry->util, __entry->avg_cap, __entry->max_cap, __entry->nl, - __entry->pl, __entry->flags) + __entry->pl, __entry->rtgb, __entry->flags) ); TRACE_EVENT(sugov_next_freq, diff --git a/include/trace/events/random.h b/include/trace/events/random.h index 0560dfc33f1c..32c10a515e2d 100644 --- a/include/trace/events/random.h +++ b/include/trace/events/random.h @@ -62,15 +62,14 @@ DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock, TRACE_EVENT(credit_entropy_bits, TP_PROTO(const char *pool_name, int bits, int entropy_count, - int entropy_total, unsigned long IP), + unsigned long IP), - TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP), + TP_ARGS(pool_name, bits, entropy_count, IP), TP_STRUCT__entry( __field( const char *, pool_name ) __field( int, bits ) __field( int, entropy_count ) - __field( int, entropy_total ) __field(unsigned long, IP ) ), @@ -78,14 +77,12 @@ TRACE_EVENT(credit_entropy_bits, __entry->pool_name = pool_name; __entry->bits = bits; __entry->entropy_count = entropy_count; - __entry->entropy_total = entropy_total; __entry->IP = IP; ), - TP_printk("%s pool: bits %d entropy_count %d entropy_total %d " - "caller %pS", __entry->pool_name, __entry->bits, - __entry->entropy_count, __entry->entropy_total, - (void *)__entry->IP) + TP_printk("%s pool: bits %d entropy_count %d caller %pS", + __entry->pool_name, __entry->bits, + __entry->entropy_count, (void *)__entry->IP) ); TRACE_EVENT(push_to_pool, diff --git a/include/trace/events/walt.h b/include/trace/events/walt.h index 0f7a9ed14dc5..2d685c92217b 100644 --- a/include/trace/events/walt.h +++ b/include/trace/events/walt.h @@ -529,12 +529,9 @@ TRACE_EVENT(sched_load_to_gov, TP_PROTO(struct rq *rq, u64 aggr_grp_load, u32 tt_load, int freq_aggr, u64 load, int policy, - int big_task_rotation, - unsigned int sysctl_sched_little_cluster_coloc_fmin_khz, - u64 coloc_boost_load), + int big_task_rotation), TP_ARGS(rq, aggr_grp_load, tt_load, freq_aggr, load, policy, - big_task_rotation, sysctl_sched_little_cluster_coloc_fmin_khz, - coloc_boost_load), + big_task_rotation), TP_STRUCT__entry( __field( int, cpu ) @@ -550,9 +547,6 @@ TRACE_EVENT(sched_load_to_gov, __field( u64, pl ) __field( u64, load ) __field( int, big_task_rotation ) - __field(unsigned int, - sysctl_sched_little_cluster_coloc_fmin_khz) - __field(u64, coloc_boost_load) ), TP_fast_assign( @@ -569,18 +563,13 @@ TRACE_EVENT(sched_load_to_gov, __entry->pl = rq->walt_stats.pred_demands_sum_scaled; __entry->load = load; __entry->big_task_rotation = big_task_rotation; - __entry->sysctl_sched_little_cluster_coloc_fmin_khz = - sysctl_sched_little_cluster_coloc_fmin_khz; - __entry->coloc_boost_load = coloc_boost_load; ), - TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr=%d tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d sysctl_sched_little_cluster_coloc_fmin_khz=%u coloc_boost_load=%llu", + TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr=%d tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d", __entry->cpu, __entry->policy, __entry->ed_task_pid, __entry->aggr_grp_load, __entry->freq_aggr, __entry->tt_load, __entry->rq_ps, __entry->grp_rq_ps, __entry->nt_ps, __entry->grp_nt_ps, __entry->pl, __entry->load, - __entry->big_task_rotation, - __entry->sysctl_sched_little_cluster_coloc_fmin_khz, - __entry->coloc_boost_load) + __entry->big_task_rotation) ); #endif diff --git a/init/Kconfig b/init/Kconfig index ef38f5ecfb71..06c42b263c6c 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1253,8 +1253,6 @@ config BPF menuconfig EXPERT bool "Configure standard kernel features (expert users)" - # Unhide debug options, to make the on-by-default options visible - select DEBUG_KERNEL help This option allows certain base kernel options and settings to be disabled or tweaked. This is for specialized diff --git a/init/do_mounts.c b/init/do_mounts.c index 05f5ae535e1e..aa0ac499cf9e 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -662,6 +662,13 @@ void __init prepare_namespace(void) md_run_setup(); dm_run_setup(); + // Try to mount partition labeled "system" first + ROOT_DEV = name_to_dev_t("PARTLABEL=system"); + if (ROOT_DEV) { + pr_info("system partition found, mounting it directly to /\n"); + goto mount; + } + if (saved_root_name[0]) { root_device_name = saved_root_name; if (!strncmp(root_device_name, "mtd", 3) || @@ -692,6 +699,7 @@ void __init prepare_namespace(void) if (is_floppy && rd_doload && rd_load_disk(0)) ROOT_DEV = Root_RAM0; +mount: mount_root(); out: devtmpfs_mount("dev"); diff --git a/init/main.c b/init/main.c index c25ff6f3c021..805853c2fb63 100644 --- a/init/main.c +++ b/init/main.c @@ -88,6 +88,7 @@ #include #include #include +#include #include #include @@ -535,6 +536,8 @@ static void __init mm_init(void) pti_init(); } +void __init init_sync_kmem_pool(void); +void __init init_dma_buf_kmem_pool(void); asmlinkage __visible void __init start_kernel(void) { char *command_line; @@ -718,6 +721,8 @@ asmlinkage __visible void __init start_kernel(void) cgroup_init(); taskstats_init_early(); delayacct_init(); + init_sync_kmem_pool(); + init_dma_buf_kmem_pool(); check_bugs(); diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index a86b94e29d6b..95a40e2d9f47 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -632,6 +632,8 @@ EXPORT_SYMBOL_GPL(of_css); ; \ else +static struct kmem_cache *cgrp_cset_link_pool; + /* * The default css_set - used by init and its children prior to any * hierarchies being mounted. It contains a pointer to the root state @@ -852,7 +854,7 @@ void put_css_set_locked(struct css_set *cset) list_del(&link->cgrp_link); if (cgroup_parent(link->cgrp)) cgroup_put(link->cgrp); - kfree(link); + kmem_cache_free(cgrp_cset_link_pool, link); } if (css_set_threaded(cset)) { @@ -1002,7 +1004,7 @@ static void free_cgrp_cset_links(struct list_head *links_to_free) list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) { list_del(&link->cset_link); - kfree(link); + kmem_cache_free(cgrp_cset_link_pool, link); } } @@ -1022,7 +1024,7 @@ static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links) INIT_LIST_HEAD(tmp_links); for (i = 0; i < count; i++) { - link = kzalloc(sizeof(*link), GFP_KERNEL); + link = kmem_cache_zalloc(cgrp_cset_link_pool, GFP_KERNEL); if (!link) { free_cgrp_cset_links(tmp_links); return -ENOMEM; @@ -1234,7 +1236,7 @@ static void cgroup_destroy_root(struct cgroup_root *root) list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) { list_del(&link->cset_link); list_del(&link->cgrp_link); - kfree(link); + kmem_cache_free(cgrp_cset_link_pool, link); } spin_unlock_irq(&css_set_lock); @@ -5358,6 +5360,8 @@ int __init cgroup_init(void) struct cgroup_subsys *ss; int ssid; + cgrp_cset_link_pool = KMEM_CACHE(cgrp_cset_link, SLAB_HWCACHE_ALIGN | SLAB_PANIC); + BUILD_BUG_ON(CGROUP_SUBSYS_COUNT > 16); BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem)); BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files)); diff --git a/kernel/cgroup/freezer.c b/kernel/cgroup/freezer.c index 08236798d173..5bbc26c4b822 100644 --- a/kernel/cgroup/freezer.c +++ b/kernel/cgroup/freezer.c @@ -35,6 +35,7 @@ enum freezer_state_flags { CGROUP_FREEZING_SELF = (1 << 1), /* this freezer is freezing */ CGROUP_FREEZING_PARENT = (1 << 2), /* the parent freezer is freezing */ CGROUP_FROZEN = (1 << 3), /* this and its descendants frozen */ + CGROUP_FREEZER_KILLABLE = (1 << 4), /* frozen pocesses can be killed */ /* mask for all FREEZING flags */ CGROUP_FREEZING = CGROUP_FREEZING_SELF | CGROUP_FREEZING_PARENT, @@ -73,6 +74,17 @@ bool cgroup_freezing(struct task_struct *task) return ret; } +bool cgroup_freezer_killable(struct task_struct *task) +{ + bool ret; + + rcu_read_lock(); + ret = task_freezer(task)->state & CGROUP_FREEZER_KILLABLE; + rcu_read_unlock(); + + return ret; +} + static const char *freezer_state_strs(unsigned int state) { if (state & CGROUP_FROZEN) @@ -111,9 +123,15 @@ static int freezer_css_online(struct cgroup_subsys_state *css) freezer->state |= CGROUP_FREEZER_ONLINE; - if (parent && (parent->state & CGROUP_FREEZING)) { - freezer->state |= CGROUP_FREEZING_PARENT | CGROUP_FROZEN; - atomic_inc(&system_freezing_cnt); + if (parent) { + if (parent->state & CGROUP_FREEZER_KILLABLE) + freezer->state |= CGROUP_FREEZER_KILLABLE; + + if (parent->state & CGROUP_FREEZING) { + freezer->state |= CGROUP_FREEZING_PARENT | + CGROUP_FROZEN; + atomic_inc(&system_freezing_cnt); + } } mutex_unlock(&freezer_mutex); @@ -450,6 +468,45 @@ static u64 freezer_parent_freezing_read(struct cgroup_subsys_state *css, return (bool)(freezer->state & CGROUP_FREEZING_PARENT); } +static u64 freezer_killable_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct freezer *freezer = css_freezer(css); + + return (bool)(freezer->state & CGROUP_FREEZER_KILLABLE); +} + +static int freezer_killable_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + struct freezer *freezer = css_freezer(css); + + if (val > 1) + return -EINVAL; + + mutex_lock(&freezer_mutex); + + if (val == !!(freezer->state & CGROUP_FREEZER_KILLABLE)) + goto out; + + if (val) + freezer->state |= CGROUP_FREEZER_KILLABLE; + else + freezer->state &= ~CGROUP_FREEZER_KILLABLE; + + + /* + * Let __refrigerator spin once for each task to set it into the + * appropriate state. + */ + unfreeze_cgroup(freezer); + +out: + mutex_unlock(&freezer_mutex); + + return 0; +} + static struct cftype files[] = { { .name = "state", @@ -467,6 +524,12 @@ static struct cftype files[] = { .flags = CFTYPE_NOT_ON_ROOT, .read_u64 = freezer_parent_freezing_read, }, + { + .name = "killable", + .flags = CFTYPE_NOT_ON_ROOT, + .write_u64 = freezer_killable_write, + .read_u64 = freezer_killable_read, + }, { } /* terminate */ }; diff --git a/kernel/cpu.c b/kernel/cpu.c index dbed3f2cf02b..7e6eb675d512 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1031,6 +1031,7 @@ static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target) static int do_cpu_down(unsigned int cpu, enum cpuhp_state target) { + struct cpumask newmask; int err; /* @@ -1058,6 +1059,12 @@ static int do_cpu_down(unsigned int cpu, enum cpuhp_state target) */ cpuset_wait_for_hotplug(); + cpumask_andnot(&newmask, cpu_online_mask, cpumask_of(cpu)); + /* One big cluster CPU and one little cluster CPU must remain online */ + if (!cpumask_intersects(&newmask, cpu_perf_mask) || + !cpumask_intersects(&newmask, cpu_lp_mask)) + return -EINVAL; + cpu_maps_update_begin(); err = cpu_down_maps_locked(cpu, target); cpu_maps_update_done(); @@ -1275,6 +1282,7 @@ int freeze_secondary_cpus(int primary) { int cpu, error = 0; + unaffine_perf_irqs(); cpu_maps_update_begin(); if (!cpu_online(primary)) primary = cpumask_first(cpu_online_mask); @@ -1284,7 +1292,7 @@ int freeze_secondary_cpus(int primary) */ cpumask_clear(frozen_cpus); - pr_info("Disabling non-boot CPUs ...\n"); + pr_debug("Disabling non-boot CPUs ...\n"); for_each_online_cpu(cpu) { if (cpu == primary) continue; @@ -1341,7 +1349,7 @@ void enable_nonboot_cpus(void) if (cpumask_empty(frozen_cpus)) goto out; - pr_info("Enabling non-boot CPUs ...\n"); + pr_debug("Enabling non-boot CPUs ...\n"); arch_enable_nonboot_cpus_begin(); @@ -1367,6 +1375,7 @@ void enable_nonboot_cpus(void) cpumask_clear(frozen_cpus); out: cpu_maps_update_done(); + reaffine_perf_irqs(); } static int __init alloc_frozen_cpus(void) diff --git a/kernel/exit.c b/kernel/exit.c index 5a25e760f421..f854388b6f3a 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -775,6 +775,32 @@ static void check_stack_usage(void) static inline void check_stack_usage(void) {} #endif +#ifndef CONFIG_PROFILING +static BLOCKING_NOTIFIER_HEAD(task_exit_notifier); + +int profile_event_register(enum profile_type t, struct notifier_block *n) +{ + if (t == PROFILE_TASK_EXIT) + return blocking_notifier_chain_register(&task_exit_notifier, n); + + return -ENOSYS; +} + +int profile_event_unregister(enum profile_type t, struct notifier_block *n) +{ + if (t == PROFILE_TASK_EXIT) + return blocking_notifier_chain_unregister(&task_exit_notifier, + n); + + return -ENOSYS; +} + +void profile_task_exit(struct task_struct *tsk) +{ + blocking_notifier_call_chain(&task_exit_notifier, 0, tsk); +} +#endif + void __noreturn do_exit(long code) { struct task_struct *tsk = current; diff --git a/kernel/extable.c b/kernel/extable.c index 9aa1cc41ecf7..1d69178550ac 100644 --- a/kernel/extable.c +++ b/kernel/extable.c @@ -62,7 +62,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr) return e; } -static inline int init_kernel_text(unsigned long addr) +int init_kernel_text(unsigned long addr) { if (addr >= (unsigned long)_sinittext && addr < (unsigned long)_einittext) diff --git a/kernel/fork.c b/kernel/fork.c index d26636de842b..3d54e88a76a2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2147,7 +2147,6 @@ bad_fork_cleanup_perf: perf_event_free_task(p); bad_fork_cleanup_policy: lockdep_free_task(p); - free_task_load_ptrs(p); #ifdef CONFIG_NUMA mpol_put(p->mempolicy); bad_fork_cleanup_threadgroup_lock: diff --git a/kernel/freezer.c b/kernel/freezer.c index 6f56a9e219fa..eddd3cc66077 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c @@ -45,6 +45,10 @@ bool freezing_slow_path(struct task_struct *p) if (test_tsk_thread_flag(p, TIF_MEMDIE)) return false; + if (cgroup_freezer_killable(p) && (fatal_signal_pending(p) + || (p->flags & PF_SIGNALED))) + return false; + if (pm_nosig_freezing || cgroup_freezing(p)) return true; @@ -66,7 +70,12 @@ bool __refrigerator(bool check_kthr_stop) pr_debug("%s entered refrigerator\n", current->comm); for (;;) { - set_current_state(TASK_UNINTERRUPTIBLE); + bool killable = cgroup_freezer_killable(current); + + if (killable) + set_current_state(TASK_INTERRUPTIBLE); + else + set_current_state(TASK_UNINTERRUPTIBLE); spin_lock_irq(&freezer_lock); current->flags |= PF_FROZEN; @@ -78,6 +87,24 @@ bool __refrigerator(bool check_kthr_stop) if (!(current->flags & PF_FROZEN)) break; was_frozen = true; + + /* + * Now we're sure that there is no pending fatal signal. + * Clear TIF_SIGPENDING to not get out of schedule() + * immediately (if there is a non-fatal signal pending), and + * put the task into sleep. + */ + if (killable) { + long flags; + + if (lock_task_sighand(current, &flags)) { + if (!sigismember(¤t->pending.signal, + SIGKILL)) + clear_thread_flag(TIF_SIGPENDING); + unlock_task_sighand(current, &flags); + } + } + schedule(); } diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c index e280a3dc25fb..976d6f6d7524 100644 --- a/kernel/irq/cpuhotplug.c +++ b/kernel/irq/cpuhotplug.c @@ -127,7 +127,10 @@ static bool migrate_one_irq(struct irq_desc *desc) return false; } - default_affinity = desc->affinity_hint ? : irq_default_affinity; + if (irqd_has_set(&desc->irq_data, IRQF_PERF_CRITICAL)) + default_affinity = cpu_perf_mask; + else + default_affinity = desc->affinity_hint ? : irq_default_affinity; /* * The order of preference for selecting a fallback CPU is * @@ -200,9 +203,11 @@ void irq_migrate_all_off_this_cpu(void) raw_spin_lock(&desc->lock); affinity_broken = migrate_one_irq(desc); raw_spin_unlock(&desc->lock); + if (cpumask_intersects(cpumask_of(smp_processor_id()), cpu_perf_mask)) + reaffine_perf_irqs(); if (affinity_broken) { - pr_info_ratelimited("IRQ %u: no longer affine to CPU%u\n", + pr_debug_ratelimited("IRQ %u: no longer affine to CPU%u\n", irq, smp_processor_id()); } } @@ -246,6 +251,8 @@ int irq_affinity_online_cpu(unsigned int cpu) raw_spin_lock_irq(&desc->lock); irq_restore_affinity_of_irq(desc, cpu); raw_spin_unlock_irq(&desc->lock); + if (cpumask_intersects(cpumask_of(cpu), cpu_perf_mask)) + reaffine_perf_irqs(); } irq_unlock_sparse(); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 5277949e82e0..3fe3ccd0eea9 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -20,9 +20,20 @@ #include #include #include +#include #include "internals.h" +struct irq_desc_list { + struct list_head list; + struct irq_desc *desc; +} perf_crit_irqs = { + .list = LIST_HEAD_INIT(perf_crit_irqs.list) +}; + +static DEFINE_RAW_SPINLOCK(perf_irqs_lock); +static int perf_cpu_index = -1; + #ifdef CONFIG_IRQ_FORCED_THREADING __read_mostly bool force_irqthreads; @@ -191,6 +202,8 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, if (!chip || !chip->irq_set_affinity) return -EINVAL; + /* IRQs only run on the first CPU in the affinity mask; reflect that */ + mask = cpumask_of(cpumask_first(mask)); ret = chip->irq_set_affinity(data, mask, force); switch (ret) { case IRQ_SET_MASK_OK: @@ -372,6 +385,9 @@ int irq_setup_affinity(struct irq_desc *desc) if (cpumask_empty(&mask)) cpumask_copy(&mask, cpu_online_mask); + if (irqd_has_set(&desc->irq_data, IRQF_PERF_CRITICAL)) + cpumask_copy(&mask, cpu_perf_mask); + if (node != NUMA_NO_NODE) { const struct cpumask *nodemask = cpumask_of_node(node); @@ -1123,6 +1139,121 @@ setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) return 0; } +static void add_desc_to_perf_list(struct irq_desc *desc) +{ + struct irq_desc_list *item; + + item = kmalloc(sizeof(*item), GFP_ATOMIC | __GFP_NOFAIL); + item->desc = desc; + + raw_spin_lock(&perf_irqs_lock); + list_add(&item->list, &perf_crit_irqs.list); + raw_spin_unlock(&perf_irqs_lock); +} + +static void affine_one_perf_thread(struct task_struct *t) +{ + t->flags |= PF_PERF_CRITICAL; + set_cpus_allowed_ptr(t, cpu_perf_mask); +} + +static void unaffine_one_perf_thread(struct task_struct *t) +{ + t->flags &= ~PF_PERF_CRITICAL; + set_cpus_allowed_ptr(t, cpu_all_mask); +} + +static void affine_one_perf_irq(struct irq_desc *desc) +{ + int cpu; + + /* + * If for some reason all perf cores are offline, + * then affine the IRQ to the cores that are left online. + */ + if (!cpumask_intersects(cpu_perf_mask, cpu_online_mask)) { + irq_set_affinity_locked(&desc->irq_data, cpu_online_mask, true); + perf_cpu_index = -1; + return; + } + + /* Balance the performance-critical IRQs across all perf CPUs */ + while (1) { + cpu = cpumask_next_and(perf_cpu_index, cpu_perf_mask, + cpu_online_mask); + if (cpu < nr_cpu_ids) + break; + perf_cpu_index = -1; + } + irq_set_affinity_locked(&desc->irq_data, cpumask_of(cpu), true); + + perf_cpu_index = cpu; +} + +static void setup_perf_irq_locked(struct irq_desc *desc) +{ + add_desc_to_perf_list(desc); + raw_spin_lock(&perf_irqs_lock); + affine_one_perf_irq(desc); + raw_spin_unlock(&perf_irqs_lock); +} + +void irq_set_perf_affinity(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + struct irqaction *action; + unsigned long flags; + + if (!desc) + return; + + raw_spin_lock_irqsave(&desc->lock, flags); + action = desc->action; + while (action) { + action->flags |= IRQF_PERF_CRITICAL; + action = action->next; + } + setup_perf_irq_locked(desc); + raw_spin_unlock_irqrestore(&desc->lock, flags); +} + +void unaffine_perf_irqs(void) +{ + struct irq_desc_list *data; + unsigned long flags; + + raw_spin_lock_irqsave(&perf_irqs_lock, flags); + list_for_each_entry(data, &perf_crit_irqs.list, list) { + struct irq_desc *desc = data->desc; + + raw_spin_lock(&desc->lock); + irq_set_affinity_locked(&desc->irq_data, cpu_all_mask, true); + if (desc->action->thread) + unaffine_one_perf_thread(desc->action->thread); + raw_spin_unlock(&desc->lock); + } + perf_cpu_index = -1; + raw_spin_unlock_irqrestore(&perf_irqs_lock, flags); +} + +void reaffine_perf_irqs(void) +{ + struct irq_desc_list *data; + unsigned long flags; + + raw_spin_lock_irqsave(&perf_irqs_lock, flags); + list_for_each_entry(data, &perf_crit_irqs.list, list) { + struct irq_desc *desc = data->desc; + + raw_spin_lock(&desc->lock); + affine_one_perf_irq(desc); + if (desc->action->thread) + affine_one_perf_thread(desc->action->thread); + raw_spin_unlock(&desc->lock); + } + raw_spin_unlock_irqrestore(&perf_irqs_lock, flags); +} + /* * Internal function to register an irqaction - typically used to * allocate special interrupts that are part of the architecture. @@ -1380,6 +1511,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) irqd_set(&desc->irq_data, IRQD_NO_BALANCING); } + if (new->flags & IRQF_PERF_CRITICAL) + setup_perf_irq_locked(desc); + if (irq_settings_can_autoenable(desc)) { irq_startup(desc, IRQ_RESEND, IRQ_START_COND); } else { @@ -1393,7 +1527,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) /* Undo nested disables: */ desc->depth = 1; } - } else if (new->flags & IRQF_TRIGGER_MASK) { unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; unsigned int omsk = irqd_get_trigger_type(&desc->irq_data); @@ -1549,6 +1682,20 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) action_ptr = &action->next; } + if (action->flags & IRQF_PERF_CRITICAL) { + struct irq_desc_list *data; + + raw_spin_lock(&perf_irqs_lock); + list_for_each_entry(data, &perf_crit_irqs.list, list) { + if (data->desc == desc) { + list_del(&data->list); + kfree(data); + break; + } + } + raw_spin_unlock(&perf_irqs_lock); + } + /* Found it - now remove it from the list of entries: */ *action_ptr = action->next; diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 374108c5bbde..d7e215b6ea20 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -144,12 +144,16 @@ static ssize_t write_irq_affinity(int type, struct file *file, const char __user *buffer, size_t count, loff_t *pos) { unsigned int irq = (int)(long)PDE_DATA(file_inode(file)); + struct irq_desc *desc = irq_to_desc(irq); cpumask_var_t new_value; int err; if (!irq_can_set_affinity_usr(irq) || no_irq_affinity) return -EIO; + if (!irqd_has_set(&desc->irq_data, IRQF_PERF_CRITICAL)) + return -EIO; + if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) return -ENOMEM; diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 70be35a19be2..f207396c2abd 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -16,6 +16,7 @@ #include #include #include +#include #ifdef HAVE_JUMP_LABEL @@ -37,23 +38,43 @@ static int jump_label_cmp(const void *a, const void *b) const struct jump_entry *jea = a; const struct jump_entry *jeb = b; - if (jea->key < jeb->key) + if (jump_entry_key(jea) < jump_entry_key(jeb)) return -1; - if (jea->key > jeb->key) + if (jump_entry_key(jea) > jump_entry_key(jeb)) return 1; return 0; } +static void jump_label_swap(void *a, void *b, int size) +{ + long delta = (unsigned long)a - (unsigned long)b; + struct jump_entry *jea = a; + struct jump_entry *jeb = b; + struct jump_entry tmp = *jea; + + jea->code = jeb->code - delta; + jea->target = jeb->target - delta; + jea->key = jeb->key - delta; + + jeb->code = tmp.code + delta; + jeb->target = tmp.target + delta; + jeb->key = tmp.key + delta; +} + static void jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) { unsigned long size; + void *swapfn = NULL; + + if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE)) + swapfn = jump_label_swap; size = (((unsigned long)stop - (unsigned long)start) / sizeof(struct jump_entry)); - sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); + sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn); } static void jump_label_update(struct static_key *key); @@ -84,6 +105,7 @@ void static_key_slow_inc_cpuslocked(struct static_key *key) int v, v1; STATIC_KEY_CHECK_USE(); + lockdep_assert_cpus_held(); /* * Careful if we get concurrent static_key_slow_inc() calls; @@ -129,6 +151,7 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc); void static_key_enable_cpuslocked(struct static_key *key) { STATIC_KEY_CHECK_USE(); + lockdep_assert_cpus_held(); if (atomic_read(&key->enabled) > 0) { WARN_ON_ONCE(atomic_read(&key->enabled) != 1); @@ -159,6 +182,7 @@ EXPORT_SYMBOL_GPL(static_key_enable); void static_key_disable_cpuslocked(struct static_key *key) { STATIC_KEY_CHECK_USE(); + lockdep_assert_cpus_held(); if (atomic_read(&key->enabled) != 1) { WARN_ON_ONCE(atomic_read(&key->enabled) != 0); @@ -180,10 +204,14 @@ void static_key_disable(struct static_key *key) } EXPORT_SYMBOL_GPL(static_key_disable); -static void __static_key_slow_dec_cpuslocked(struct static_key *key, - unsigned long rate_limit, - struct delayed_work *work) +static bool static_key_slow_try_dec(struct static_key *key) { + int val; + + val = __atomic_add_unless(&key->enabled, -1, 1); + if (val == 1) + return false; + /* * The negative count check is valid even when a negative * key->enabled is in use by static_key_slow_inc(); a @@ -191,63 +219,70 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key, * returns is unbalanced, because all other static_key_slow_inc() * instances block while the update is in progress. */ - if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { - WARN(atomic_read(&key->enabled) < 0, - "jump label: negative count!\n"); - return; - } + WARN(val < 0, "jump label: negative count!\n"); + return true; +} - if (rate_limit) { - atomic_inc(&key->enabled); - schedule_delayed_work(work, rate_limit); - } else { +static void __static_key_slow_dec_cpuslocked(struct static_key *key) +{ + lockdep_assert_cpus_held(); + + if (static_key_slow_try_dec(key)) + return; + + jump_label_lock(); + if (atomic_dec_and_test(&key->enabled)) jump_label_update(key); - } jump_label_unlock(); } -static void __static_key_slow_dec(struct static_key *key, - unsigned long rate_limit, - struct delayed_work *work) +static void __static_key_slow_dec(struct static_key *key) { cpus_read_lock(); - __static_key_slow_dec_cpuslocked(key, rate_limit, work); + __static_key_slow_dec_cpuslocked(key); cpus_read_unlock(); } -static void jump_label_update_timeout(struct work_struct *work) +void jump_label_update_timeout(struct work_struct *work) { struct static_key_deferred *key = container_of(work, struct static_key_deferred, work.work); - __static_key_slow_dec(&key->key, 0, NULL); + __static_key_slow_dec(&key->key); } +EXPORT_SYMBOL_GPL(jump_label_update_timeout); void static_key_slow_dec(struct static_key *key) { STATIC_KEY_CHECK_USE(); - __static_key_slow_dec(key, 0, NULL); + __static_key_slow_dec(key); } EXPORT_SYMBOL_GPL(static_key_slow_dec); void static_key_slow_dec_cpuslocked(struct static_key *key) { STATIC_KEY_CHECK_USE(); - __static_key_slow_dec_cpuslocked(key, 0, NULL); + __static_key_slow_dec_cpuslocked(key); } -void static_key_slow_dec_deferred(struct static_key_deferred *key) +void __static_key_slow_dec_deferred(struct static_key *key, + struct delayed_work *work, + unsigned long timeout) { STATIC_KEY_CHECK_USE(); - __static_key_slow_dec(&key->key, key->timeout, &key->work); -} -EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred); -void static_key_deferred_flush(struct static_key_deferred *key) + if (static_key_slow_try_dec(key)) + return; + + schedule_delayed_work(work, timeout); +} +EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred); + +void __static_key_deferred_flush(void *key, struct delayed_work *work) { STATIC_KEY_CHECK_USE(); - flush_delayed_work(&key->work); + flush_delayed_work(work); } -EXPORT_SYMBOL_GPL(static_key_deferred_flush); +EXPORT_SYMBOL_GPL(__static_key_deferred_flush); void jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl) @@ -260,8 +295,8 @@ EXPORT_SYMBOL_GPL(jump_label_rate_limit); static int addr_conflict(struct jump_entry *entry, void *start, void *end) { - if (entry->code <= (unsigned long)end && - entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start) + if (jump_entry_code(entry) <= (unsigned long)end && + jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start) return 1; return 0; @@ -320,16 +355,6 @@ static inline void static_key_set_linked(struct static_key *key) key->type |= JUMP_TYPE_LINKED; } -static inline struct static_key *jump_entry_key(struct jump_entry *entry) -{ - return (struct static_key *)((unsigned long)entry->key & ~1UL); -} - -static bool jump_entry_branch(struct jump_entry *entry) -{ - return (unsigned long)entry->key & 1UL; -} - /*** * A 'struct static_key' uses a union such that it either points directly * to a table of 'struct jump_entry' or to a linked list of modules which in @@ -354,7 +379,7 @@ static enum jump_label_type jump_label_type(struct jump_entry *entry) { struct static_key *key = jump_entry_key(entry); bool enabled = static_key_enabled(key); - bool branch = jump_entry_branch(entry); + bool branch = jump_entry_is_branch(entry); /* See the comment in linux/jump_label.h */ return enabled ^ branch; @@ -362,16 +387,21 @@ static enum jump_label_type jump_label_type(struct jump_entry *entry) static void __jump_label_update(struct static_key *key, struct jump_entry *entry, - struct jump_entry *stop) + struct jump_entry *stop, + bool init) { for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { /* - * entry->code set to 0 invalidates module init text sections - * kernel_text_address() verifies we are not in core kernel - * init code, see jump_label_invalidate_module_init(). + * An entry->code of 0 indicates an entry which has been + * disabled because it was in an init text area. */ - if (entry->code && kernel_text_address(entry->code)) - arch_jump_label_transform(entry, jump_label_type(entry)); + if (init || !jump_entry_is_init(entry)) { + if (kernel_text_address(jump_entry_code(entry))) + arch_jump_label_transform(entry, jump_label_type(entry)); + else + WARN_ONCE(1, "can't patch jump_label at %pS", + (void *)jump_entry_code(entry)); + } } } @@ -405,6 +435,9 @@ void __init jump_label_init(void) if (jump_label_type(iter) == JUMP_LABEL_NOP) arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); + if (init_section_contains((void *)jump_entry_code(iter), 1)) + jump_entry_set_init(iter); + iterk = jump_entry_key(iter); if (iterk == key) continue; @@ -423,7 +456,7 @@ static enum jump_label_type jump_label_init_type(struct jump_entry *entry) { struct static_key *key = jump_entry_key(entry); bool type = static_key_type(key); - bool branch = jump_entry_branch(entry); + bool branch = jump_entry_is_branch(entry); /* See the comment in linux/jump_label.h */ return type ^ branch; @@ -437,7 +470,7 @@ struct static_key_mod { static inline struct static_key_mod *static_key_mod(struct static_key *key) { - WARN_ON_ONCE(!(key->type & JUMP_TYPE_LINKED)); + WARN_ON_ONCE(!static_key_linked(key)); return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK); } @@ -496,7 +529,8 @@ static void __jump_label_mod_update(struct static_key *key) stop = __stop___jump_table; else stop = m->jump_entries + m->num_jump_entries; - __jump_label_update(key, mod->entries, stop); + __jump_label_update(key, mod->entries, stop, + m && m->state == MODULE_STATE_COMING); } } @@ -542,12 +576,15 @@ static int jump_label_add_module(struct module *mod) for (iter = iter_start; iter < iter_stop; iter++) { struct static_key *iterk; + if (within_module_init(jump_entry_code(iter), mod)) + jump_entry_set_init(iter); + iterk = jump_entry_key(iter); if (iterk == key) continue; key = iterk; - if (within_module(iter->key, mod)) { + if (within_module((unsigned long)key, mod)) { static_key_set_entries(key, iter); continue; } @@ -577,7 +614,7 @@ static int jump_label_add_module(struct module *mod) /* Only update if we've changed from our initial state */ if (jump_label_type(iter) != jump_label_init_type(iter)) - __jump_label_update(key, iter, iter_stop); + __jump_label_update(key, iter, iter_stop, true); } return 0; @@ -597,7 +634,7 @@ static void jump_label_del_module(struct module *mod) key = jump_entry_key(iter); - if (within_module(iter->key, mod)) + if (within_module((unsigned long)key, mod)) continue; /* No memory during module load */ @@ -633,18 +670,6 @@ static void jump_label_del_module(struct module *mod) } } -static void jump_label_invalidate_module_init(struct module *mod) -{ - struct jump_entry *iter_start = mod->jump_entries; - struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; - struct jump_entry *iter; - - for (iter = iter_start; iter < iter_stop; iter++) { - if (within_module_init(iter->code, mod)) - iter->code = 0; - } -} - static int jump_label_module_notify(struct notifier_block *self, unsigned long val, void *data) @@ -659,16 +684,13 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val, case MODULE_STATE_COMING: ret = jump_label_add_module(mod); if (ret) { - WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n"); + WARN(1, "Failed to allocate memory: jump_label may not work properly.\n"); jump_label_del_module(mod); } break; case MODULE_STATE_GOING: jump_label_del_module(mod); break; - case MODULE_STATE_LIVE: - jump_label_invalidate_module_init(mod); - break; } jump_label_unlock(); @@ -738,7 +760,8 @@ static void jump_label_update(struct static_key *key) entry = static_key_entries(key); /* if there are no users, entry can be NULL */ if (entry) - __jump_label_update(key, entry, stop); + __jump_label_update(key, entry, stop, + system_state < SYSTEM_RUNNING); } #ifdef CONFIG_STATIC_KEYS_SELFTEST diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 66f1818d4762..a4665a65d5f1 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1879,6 +1879,25 @@ void unregister_jprobes(struct jprobe **jps, int num) } EXPORT_SYMBOL_GPL(unregister_jprobes); +bool __weak arch_kprobe_on_func_entry(unsigned long offset) +{ + return !offset; +} + +bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset) +{ + kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset); + + if (IS_ERR(kp_addr)) + return false; + + if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) || + !arch_kprobe_on_func_entry(offset)) + return false; + + return true; +} + #ifdef CONFIG_KRETPROBES /* * This kprobe pre_handler is registered with every kretprobe. When probe @@ -1935,25 +1954,6 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) } NOKPROBE_SYMBOL(pre_handler_kretprobe); -bool __weak arch_kprobe_on_func_entry(unsigned long offset) -{ - return !offset; -} - -bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset) -{ - kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset); - - if (IS_ERR(kp_addr)) - return false; - - if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) || - !arch_kprobe_on_func_entry(offset)) - return false; - - return true; -} - int register_kretprobe(struct kretprobe *rp) { int ret = 0; diff --git a/kernel/kthread.c b/kernel/kthread.c index f631291f71ee..a6d8ba28f5ab 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -281,18 +281,15 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), { DECLARE_COMPLETION_ONSTACK(done); struct task_struct *task; - struct kthread_create_info *create = kmalloc(sizeof(*create), - GFP_KERNEL); + struct kthread_create_info create; - if (!create) - return ERR_PTR(-ENOMEM); - create->threadfn = threadfn; - create->data = data; - create->node = node; - create->done = &done; + create.threadfn = threadfn; + create.data = data; + create.node = node; + create.done = &done; spin_lock(&kthread_create_lock); - list_add_tail(&create->list, &kthread_create_list); + list_add_tail(&create.list, &kthread_create_list); spin_unlock(&kthread_create_lock); wake_up_process(kthreadd_task); @@ -307,7 +304,7 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), * calls complete(), leave the cleanup of this structure to * that thread. */ - if (xchg(&create->done, NULL)) + if (xchg(&create.done, NULL)) return ERR_PTR(-EINTR); /* * kthreadd (or new kernel thread) will call complete() @@ -315,7 +312,7 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), */ wait_for_completion(&done); } - task = create->result; + task = create.result; if (!IS_ERR(task)) { static const struct sched_param param = { .sched_priority = 0 }; char name[TASK_COMM_LEN]; @@ -333,7 +330,6 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m); set_cpus_allowed_ptr(task, cpu_all_mask); } - kfree(create); return task; } diff --git a/kernel/power/process.c b/kernel/power/process.c index 1ae7f93efde9..03a944409408 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -108,7 +108,7 @@ static int try_to_freeze_tasks(bool user_only) } read_unlock(&tasklist_lock); } else { - pr_cont("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000, + pr_debug("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000, elapsed_msecs % 1000); } @@ -137,14 +137,14 @@ int freeze_processes(void) atomic_inc(&system_freezing_cnt); pm_wakeup_clear(true); - pr_info("Freezing user space processes ... "); + pr_debug("Freezing user space processes ... "); pm_freezing = true; error = try_to_freeze_tasks(true); if (!error) { __usermodehelper_set_disable_depth(UMH_DISABLED); - pr_cont("done."); + pr_debug("done."); } - pr_cont("\n"); + pr_debug("\n"); BUG_ON(in_atomic()); /* @@ -173,14 +173,14 @@ int freeze_kernel_threads(void) { int error; - pr_info("Freezing remaining freezable tasks ... "); + pr_debug("Freezing remaining freezable tasks ... "); pm_nosig_freezing = true; error = try_to_freeze_tasks(false); if (!error) - pr_cont("done."); + pr_debug("done."); - pr_cont("\n"); + pr_debug("\n"); BUG_ON(in_atomic()); if (error) @@ -201,7 +201,7 @@ void thaw_processes(void) oom_killer_enable(); - pr_info("Restarting tasks ... "); + pr_debug("Restarting tasks ... "); __usermodehelper_set_disable_depth(UMH_FREEZING); thaw_workqueues(); @@ -222,7 +222,7 @@ void thaw_processes(void) usermodehelper_enable(); schedule(); - pr_cont("done.\n"); + pr_debug("done.\n"); trace_suspend_resume(TPS("thaw_processes"), 0, false); } @@ -243,5 +243,5 @@ void thaw_kernel_threads(void) read_unlock(&tasklist_lock); schedule(); - pr_cont("done.\n"); + pr_debug("done.\n"); } diff --git a/kernel/power/qos.c b/kernel/power/qos.c index adf8b2006732..a41c05b7d7b6 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -54,7 +54,7 @@ /* * locking rule: all changes to constraints or notifiers lists * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock - * held, taken with _irqsave. One lock to rule them all + * held. One lock to rule them all */ struct pm_qos_object { struct pm_qos_constraints *constraints; @@ -199,7 +199,6 @@ static int pm_qos_dbg_show_requests(struct seq_file *s, void *unused) struct pm_qos_constraints *c; struct pm_qos_request *req; char *type; - unsigned long flags; int tot_reqs = 0; int active_reqs = 0; @@ -214,7 +213,7 @@ static int pm_qos_dbg_show_requests(struct seq_file *s, void *unused) } /* Lock to ensure we have a snapshot */ - spin_lock_irqsave(&pm_qos_lock, flags); + spin_lock(&pm_qos_lock); if (plist_head_empty(&c->list)) { seq_puts(s, "Empty!\n"); goto out; @@ -250,7 +249,7 @@ static int pm_qos_dbg_show_requests(struct seq_file *s, void *unused) type, pm_qos_get_value(c), active_reqs, tot_reqs); out: - spin_unlock_irqrestore(&pm_qos_lock, flags); + spin_unlock(&pm_qos_lock); return 0; } @@ -325,12 +324,11 @@ static inline int pm_qos_set_value_for_cpus(struct pm_qos_constraints *c, int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, enum pm_qos_req_action action, int value) { - unsigned long flags; int prev_value, curr_value, new_value; struct cpumask cpus; int ret; - spin_lock_irqsave(&pm_qos_lock, flags); + spin_lock(&pm_qos_lock); prev_value = pm_qos_get_value(c); if (value == PM_QOS_DEFAULT_VALUE) new_value = c->default_value; @@ -362,10 +360,6 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, pm_qos_set_value(c, curr_value); ret = pm_qos_set_value_for_cpus(c, &cpus); - spin_unlock_irqrestore(&pm_qos_lock, flags); - - trace_pm_qos_update_target(action, prev_value, curr_value); - /* * if cpu mask bits are set, call the notifier call chain * to update the new qos restriction for the cores @@ -380,6 +374,11 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, } else { ret = 0; } + + spin_unlock(&pm_qos_lock); + + trace_pm_qos_update_target(action, prev_value, curr_value); + return ret; } @@ -415,10 +414,9 @@ bool pm_qos_update_flags(struct pm_qos_flags *pqf, struct pm_qos_flags_request *req, enum pm_qos_req_action action, s32 val) { - unsigned long irqflags; s32 prev_value, curr_value; - spin_lock_irqsave(&pm_qos_lock, irqflags); + spin_lock(&pm_qos_lock); prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags; @@ -441,7 +439,7 @@ bool pm_qos_update_flags(struct pm_qos_flags *pqf, curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags; - spin_unlock_irqrestore(&pm_qos_lock, irqflags); + spin_unlock(&pm_qos_lock); trace_pm_qos_update_flags(action, prev_value, curr_value); return prev_value != curr_value; @@ -476,12 +474,11 @@ EXPORT_SYMBOL_GPL(pm_qos_request_active); int pm_qos_request_for_cpumask(int pm_qos_class, struct cpumask *mask) { - unsigned long irqflags; int cpu; struct pm_qos_constraints *c = NULL; int val; - spin_lock_irqsave(&pm_qos_lock, irqflags); + spin_lock(&pm_qos_lock); c = pm_qos_array[pm_qos_class]->constraints; val = c->default_value; @@ -500,7 +497,7 @@ int pm_qos_request_for_cpumask(int pm_qos_class, struct cpumask *mask) break; } } - spin_unlock_irqrestore(&pm_qos_lock, irqflags); + spin_unlock(&pm_qos_lock); return val; } @@ -535,7 +532,6 @@ static void pm_qos_work_fn(struct work_struct *work) #ifdef CONFIG_SMP static void pm_qos_irq_release(struct kref *ref) { - unsigned long flags; struct irq_affinity_notify *notify = container_of(ref, struct irq_affinity_notify, kref); struct pm_qos_request *req = container_of(notify, @@ -543,9 +539,9 @@ static void pm_qos_irq_release(struct kref *ref) struct pm_qos_constraints *c = pm_qos_array[req->pm_qos_class]->constraints; - spin_lock_irqsave(&pm_qos_lock, flags); + spin_lock(&pm_qos_lock); cpumask_setall(&req->cpus_affine); - spin_unlock_irqrestore(&pm_qos_lock, flags); + spin_unlock(&pm_qos_lock); pm_qos_update_target(c, &req->node, PM_QOS_UPDATE_REQ, c->default_value); @@ -554,7 +550,6 @@ static void pm_qos_irq_release(struct kref *ref) static void pm_qos_irq_notify(struct irq_affinity_notify *notify, const cpumask_t *unused_mask) { - unsigned long flags; struct pm_qos_request *req = container_of(notify, struct pm_qos_request, irq_notify); struct pm_qos_constraints *c = @@ -564,13 +559,13 @@ static void pm_qos_irq_notify(struct irq_affinity_notify *notify, irq_data_get_effective_affinity_mask(&desc->irq_data); bool affinity_changed = false; - spin_lock_irqsave(&pm_qos_lock, flags); + spin_lock(&pm_qos_lock); if (!cpumask_equal(&req->cpus_affine, new_affinity)) { cpumask_copy(&req->cpus_affine, new_affinity); affinity_changed = true; } - spin_unlock_irqrestore(&pm_qos_lock, flags); + spin_unlock(&pm_qos_lock); if (affinity_changed) pm_qos_update_target(c, &req->node, PM_QOS_UPDATE_REQ, @@ -872,7 +867,6 @@ static ssize_t pm_qos_power_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { s32 value; - unsigned long flags; struct pm_qos_request *req = filp->private_data; if (!req) @@ -880,9 +874,9 @@ static ssize_t pm_qos_power_read(struct file *filp, char __user *buf, if (!pm_qos_request_active(req)) return -EINVAL; - spin_lock_irqsave(&pm_qos_lock, flags); + spin_lock(&pm_qos_lock); value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints); - spin_unlock_irqrestore(&pm_qos_lock, flags); + spin_unlock(&pm_qos_lock); return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32)); } diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 77161381f725..3b95362327d3 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -153,11 +153,11 @@ static int suspend_monitor_kthread(void *arg) if (suspend_mon_toggle == TOGGLE_START) { timeout = msecs_to_jiffies( SUSPEND_TIMER_TIMEOUT_MS); - pr_info("Start suspend monitor\n"); + pr_debug("Start suspend monitor\n"); } else if (suspend_mon_toggle == TOGGLE_STOP) { timeout = MAX_SCHEDULE_TIMEOUT; timeout_count = 0; - pr_info("Stop suspend monitor\n"); + pr_debug("Stop suspend monitor\n"); } suspend_mon_toggle = TOGGLE_NONE; mutex_unlock(&suspend_mon_lock); @@ -790,7 +790,7 @@ static void pm_suspend_marker(char *annotation) getnstimeofday(&ts); rtc_time_to_tm(ts.tv_sec, &tm); - pr_info("PM: suspend %s %d-%02d-%02d %02d:%02d:%02d.%09lu UTC\n", + pr_debug("PM: suspend %s %d-%02d-%02d %02d:%02d:%02d.%09lu UTC\n", annotation, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); } diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 26af97b07d98..c5bc1549d6d8 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -820,10 +820,16 @@ static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from) endp++; len -= endp - line; line = endp; + /* QG-D */ + if (strstr(line, "healthd")|| + strstr(line, "cacert") || + !strcmp(line, "CP: Couldn't")) + goto free; } } printk_emit(facility, level, NULL, 0, "%s", line); +free: kfree(buf); return ret; } @@ -1346,13 +1352,11 @@ static size_t msg_print_text(const struct printk_log *msg, bool syslog, char *bu static int syslog_print(char __user *buf, int size) { - char *text; + char text[LOG_LINE_MAX + PREFIX_MAX]; struct printk_log *msg; int len = 0; - text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); - if (!text) - return -ENOMEM; + memset(&text, 0, LOG_LINE_MAX + PREFIX_MAX); while (size > 0) { size_t n; @@ -1401,18 +1405,15 @@ static int syslog_print(char __user *buf, int size) buf += n; } - kfree(text); return len; } static int syslog_print_all(char __user *buf, int size, bool clear) { - char *text; + char text[LOG_LINE_MAX + PREFIX_MAX]; int len = 0; - text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); - if (!text) - return -ENOMEM; + memset(&text, 0, LOG_LINE_MAX + PREFIX_MAX); logbuf_lock_irq(); if (buf) { @@ -1483,7 +1484,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear) } logbuf_unlock_irq(); - kfree(text); return len; } @@ -2207,7 +2207,7 @@ int add_preferred_console(char *name, int idx, char *options) return __add_preferred_console(name, idx, options, NULL); } -bool console_suspend_enabled = true; +bool console_suspend_enabled = false; EXPORT_SYMBOL(console_suspend_enabled); static int __init console_suspend_disable(char *str) @@ -2235,7 +2235,6 @@ void suspend_console(void) { if (!console_suspend_enabled) return; - printk("Suspending console(s) (use no_console_suspend to debug)\n"); console_lock(); console_suspended = 1; up_console_sem(); diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 7a577bd989a4..4acfde403f44 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -718,6 +718,7 @@ static int __noreturn rcu_tasks_kthread(void *arg) struct rcu_head *list; struct rcu_head *next; LIST_HEAD(rcu_tasks_holdouts); + int fract; /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ housekeeping_affine(current); @@ -799,13 +800,25 @@ static int __noreturn rcu_tasks_kthread(void *arg) * holdouts. When the list is empty, we are done. */ lastreport = jiffies; - while (!list_empty(&rcu_tasks_holdouts)) { + + /* Start off with HZ/10 wait and slowly back off to 1 HZ wait*/ + fract = 10; + + for (;;) { bool firstreport; bool needreport; int rtst; struct task_struct *t1; - schedule_timeout_interruptible(HZ); + if (list_empty(&rcu_tasks_holdouts)) + break; + + /* Slowly back off waiting for holdouts */ + schedule_timeout_interruptible(HZ/fract); + + if (fract > 1) + fract--; + rtst = READ_ONCE(rcu_task_stall_timeout); needreport = rtst > 0 && time_after(jiffies, lastreport + rtst); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d125602d43cb..390a861e169d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -34,7 +34,6 @@ #include #include -#include #include #ifdef CONFIG_PARAVIRT #include @@ -1072,6 +1071,17 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma p->nr_cpus_allowed = cpumask_weight(new_mask); } +static const struct cpumask * +adjust_cpumask(const struct task_struct *p, + const struct cpumask *orig_mask) +{ + /* Force all performance-critical kthreads onto the big cluster */ + if (p->flags & PF_PERF_CRITICAL) + return cpu_perf_mask; + + return orig_mask; +} + void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { struct rq *rq = task_rq(p); @@ -1120,6 +1130,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, int ret = 0; cpumask_t allowed_mask; + new_mask = adjust_cpumask(p, new_mask); + rq = task_rq_lock(p, &rf); update_rq_clock(rq); @@ -2930,7 +2942,7 @@ context_switch(struct rq *rq, struct task_struct *prev, */ rq_unpin_lock(rq, rf); spin_release(&rq->lock.dep_map, 1, _THIS_IP_); - uncached_logk(LOGK_CTXID, (void *)(u64)next->pid); + /* Here we just switch the register state and the stack. */ switch_to(prev, next, prev); barrier(); @@ -5972,7 +5984,7 @@ int sched_isolate_count(const cpumask_t *mask, bool include_offline) */ int sched_isolate_cpu(int cpu) { - struct rq *rq = cpu_rq(cpu); + struct rq *rq; cpumask_t avail_cpus; int ret_code = 0; u64 start_time = 0; @@ -5984,11 +5996,14 @@ int sched_isolate_cpu(int cpu) cpumask_andnot(&avail_cpus, cpu_online_mask, cpu_isolated_mask); - if (!cpu_online(cpu)) { + if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_possible(cpu) + || !cpu_online(cpu)) { ret_code = -EINVAL; goto out; } + rq = cpu_rq(cpu); + if (++cpu_isolation_vote[cpu] > 1) goto out; @@ -6047,6 +6062,10 @@ int sched_unisolate_cpu_unlocked(int cpu) struct rq *rq = cpu_rq(cpu); u64 start_time = 0; + if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_possible(cpu)) { + ret_code = -EINVAL; + goto out; + } if (trace_sched_isolate_enabled()) start_time = sched_clock(); @@ -7575,7 +7594,6 @@ void sched_exit(struct task_struct *p) enqueue_task(rq, p, 0); clear_ed_task(p, rq); task_rq_unlock(rq, p, &rf); - free_task_load_ptrs(p); } #endif /* CONFIG_SCHED_WALT */ diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c index dbc51442ecbc..42ce32a1abad 100644 --- a/kernel/sched/cpufreq.c +++ b/kernel/sched/cpufreq.c @@ -8,6 +8,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#include #include "sched.h" @@ -61,3 +62,19 @@ void cpufreq_remove_update_util_hook(int cpu) rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), NULL); } EXPORT_SYMBOL_GPL(cpufreq_remove_update_util_hook); + +/** + * cpufreq_this_cpu_can_update - Check if cpufreq policy can be updated. + * @policy: cpufreq policy to check. + * + * Return 'true' if: + * - the local and remote CPUs share @policy, + * - dvfs_possible_from_any_cpu is set in @policy and the local CPU is not going + * offline (in which case it is not expected to run cpufreq updates any more). + */ +bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy) +{ + return cpumask_test_cpu(smp_processor_id(), policy->cpus) || + (policy->dvfs_possible_from_any_cpu && + rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data))); +} diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 58e07e6596b2..415a309b77f6 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -25,9 +25,10 @@ struct sugov_tunables { struct gov_attr_set attr_set; unsigned int up_rate_limit_us; unsigned int down_rate_limit_us; - unsigned int hispeed_load; - unsigned int hispeed_freq; - bool pl; + unsigned int hispeed_load; + unsigned int hispeed_freq; + bool pl; + unsigned int rtg_boost_freq; }; struct sugov_policy { @@ -49,6 +50,7 @@ struct sugov_policy { unsigned int next_freq; unsigned int cached_raw_freq; unsigned long hispeed_util; + unsigned long rtg_boost_util; unsigned long max; /* The next fields are only needed if fast switch cannot be used. */ @@ -107,12 +109,10 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) * by the hardware, as calculating the frequency is pointless if * we cannot in fact act on it. * - * For the slow switching platforms, the kthread is always scheduled on - * the right set of CPUs and any CPU can find the next frequency and - * schedule the kthread. + * This is needed on the slow switching platforms too to prevent CPUs + * going offline from leaving stale IRQ work items behind. */ - if (sg_policy->policy->fast_switch_enabled && - !cpufreq_can_do_remote_dvfs(sg_policy->policy)) + if (!cpufreq_this_cpu_can_update(sg_policy->policy)) return false; if (unlikely(sg_policy->need_freq_update)) { @@ -376,11 +376,15 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } #define NL_RATIO 75 #define DEFAULT_HISPEED_LOAD 90 +#define DEFAULT_CPU0_RTG_BOOST_FREQ 1000000 +#define DEFAULT_CPU4_RTG_BOOST_FREQ 0 +#define DEFAULT_CPU7_RTG_BOOST_FREQ 0 static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util, unsigned long *max) { struct sugov_policy *sg_policy = sg_cpu->sg_policy; bool is_migration = sg_cpu->flags & SCHED_CPUFREQ_INTERCLUSTER_MIG; + bool is_rtg_boost = sg_cpu->walt_load.rtgb_active; unsigned long nl = sg_cpu->walt_load.nl; unsigned long cpu_util = sg_cpu->util; bool is_hiload; @@ -389,6 +393,9 @@ static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util, if (unlikely(!sysctl_sched_use_walt_cpu_util)) return; + if (is_rtg_boost) + *util = max(*util, sg_policy->rtg_boost_util); + is_hiload = (cpu_util >= mult_frac(sg_policy->avg_cap, sg_policy->tunables->hispeed_load, 100)); @@ -406,13 +413,23 @@ static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util, } } +static inline unsigned long target_util(struct sugov_policy *sg_policy, + unsigned int freq) +{ + unsigned long util; + + util = freq_to_util(sg_policy, freq); + util = mult_frac(util, TARGET_LOAD, 100); + return util; +} + static void sugov_update_single(struct update_util_data *hook, u64 time, unsigned int flags) { struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); struct sugov_policy *sg_policy = sg_cpu->sg_policy; struct cpufreq_policy *policy = sg_policy->policy; - unsigned long util, max, hs_util; + unsigned long util, max, hs_util, boost_util; unsigned int next_f; bool busy; @@ -436,11 +453,14 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, sugov_get_util(&util, &max, sg_cpu->cpu); if (sg_policy->max != max) { sg_policy->max = max; - hs_util = freq_to_util(sg_policy, + hs_util = target_util(sg_policy, sg_policy->tunables->hispeed_freq); - hs_util = mult_frac(hs_util, TARGET_LOAD, 100); sg_policy->hispeed_util = hs_util; - } + + boost_util = target_util(sg_policy, + sg_policy->tunables->rtg_boost_freq); + sg_policy->rtg_boost_util = boost_util; + } sg_cpu->util = util; sg_cpu->max = max; @@ -450,7 +470,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, sg_policy->policy->cur); trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util, sg_policy->avg_cap, max, sg_cpu->walt_load.nl, - sg_cpu->walt_load.pl, flags); + sg_cpu->walt_load.pl, + sg_cpu->walt_load.rtgb_active, flags); sugov_iowait_boost(sg_cpu, &util, &max); sugov_walt_adjust(sg_cpu, &util, &max); @@ -526,7 +547,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time, { struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); struct sugov_policy *sg_policy = sg_cpu->sg_policy; - unsigned long util, max, hs_util; + unsigned long util, max, hs_util, boost_util; unsigned int next_f; if (!sg_policy->tunables->pl && flags & SCHED_CPUFREQ_PL) @@ -540,10 +561,13 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time, if (sg_policy->max != max) { sg_policy->max = max; - hs_util = freq_to_util(sg_policy, + hs_util = target_util(sg_policy, sg_policy->tunables->hispeed_freq); - hs_util = mult_frac(hs_util, TARGET_LOAD, 100); sg_policy->hispeed_util = hs_util; + + boost_util = target_util(sg_policy, + sg_policy->tunables->rtg_boost_freq); + sg_policy->rtg_boost_util = boost_util; } sg_cpu->util = util; @@ -558,7 +582,8 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time, trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util, sg_policy->avg_cap, max, sg_cpu->walt_load.nl, - sg_cpu->walt_load.pl, flags); + sg_cpu->walt_load.pl, + sg_cpu->walt_load.rtgb_active, flags); if (sugov_should_update_freq(sg_policy, time) && !(flags & SCHED_CPUFREQ_CONTINUE)) { @@ -637,14 +662,14 @@ static ssize_t up_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf) { struct sugov_tunables *tunables = to_sugov_tunables(attr_set); - return sprintf(buf, "%u\n", tunables->up_rate_limit_us); + return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->up_rate_limit_us); } static ssize_t down_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf) { struct sugov_tunables *tunables = to_sugov_tunables(attr_set); - return sprintf(buf, "%u\n", tunables->down_rate_limit_us); + return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->down_rate_limit_us); } static ssize_t up_rate_limit_us_store(struct gov_attr_set *attr_set, @@ -729,9 +754,8 @@ static ssize_t hispeed_freq_store(struct gov_attr_set *attr_set, tunables->hispeed_freq = val; list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) { raw_spin_lock_irqsave(&sg_policy->update_lock, flags); - hs_util = freq_to_util(sg_policy, + hs_util = target_util(sg_policy, sg_policy->tunables->hispeed_freq); - hs_util = mult_frac(hs_util, TARGET_LOAD, 100); sg_policy->hispeed_util = hs_util; raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags); } @@ -739,6 +763,37 @@ static ssize_t hispeed_freq_store(struct gov_attr_set *attr_set, return count; } +static ssize_t rtg_boost_freq_show(struct gov_attr_set *attr_set, char *buf) +{ + struct sugov_tunables *tunables = to_sugov_tunables(attr_set); + + return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->rtg_boost_freq); +} + +static ssize_t rtg_boost_freq_store(struct gov_attr_set *attr_set, + const char *buf, size_t count) +{ + struct sugov_tunables *tunables = to_sugov_tunables(attr_set); + unsigned int val; + struct sugov_policy *sg_policy; + unsigned long boost_util; + unsigned long flags; + + if (kstrtouint(buf, 10, &val)) + return -EINVAL; + + tunables->rtg_boost_freq = val; + list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) { + raw_spin_lock_irqsave(&sg_policy->update_lock, flags); + boost_util = target_util(sg_policy, + sg_policy->tunables->rtg_boost_freq); + sg_policy->rtg_boost_util = boost_util; + raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags); + } + + return count; +} + static ssize_t pl_show(struct gov_attr_set *attr_set, char *buf) { struct sugov_tunables *tunables = to_sugov_tunables(attr_set); @@ -761,6 +816,7 @@ static struct governor_attr up_rate_limit_us = __ATTR_RW(up_rate_limit_us); static struct governor_attr down_rate_limit_us = __ATTR_RW(down_rate_limit_us); static struct governor_attr hispeed_load = __ATTR_RW(hispeed_load); static struct governor_attr hispeed_freq = __ATTR_RW(hispeed_freq); +static struct governor_attr rtg_boost_freq = __ATTR_RW(rtg_boost_freq); static struct governor_attr pl = __ATTR_RW(pl); static struct attribute *sugov_attributes[] = { @@ -768,6 +824,7 @@ static struct attribute *sugov_attributes[] = { &down_rate_limit_us.attr, &hispeed_load.attr, &hispeed_freq.attr, + &rtg_boost_freq.attr, &pl.attr, NULL }; @@ -885,6 +942,7 @@ static void sugov_tunables_save(struct cpufreq_policy *policy, cached->pl = tunables->pl; cached->hispeed_load = tunables->hispeed_load; + cached->rtg_boost_freq = tunables->rtg_boost_freq; cached->hispeed_freq = tunables->hispeed_freq; cached->up_rate_limit_us = tunables->up_rate_limit_us; cached->down_rate_limit_us = tunables->down_rate_limit_us; @@ -909,6 +967,7 @@ static void sugov_tunables_restore(struct cpufreq_policy *policy) tunables->pl = cached->pl; tunables->hispeed_load = cached->hispeed_load; + tunables->rtg_boost_freq = cached->rtg_boost_freq; tunables->hispeed_freq = cached->hispeed_freq; tunables->up_rate_limit_us = cached->up_rate_limit_us; tunables->down_rate_limit_us = cached->down_rate_limit_us; @@ -919,6 +978,7 @@ static int sugov_init(struct cpufreq_policy *policy) { struct sugov_policy *sg_policy; struct sugov_tunables *tunables; + unsigned long util; int ret = 0; /* State should be equivalent to EXIT */ @@ -964,8 +1024,25 @@ static int sugov_init(struct cpufreq_policy *policy) tunables->hispeed_load = DEFAULT_HISPEED_LOAD; tunables->hispeed_freq = 0; + switch (policy->cpu) { + default: + case 0: + tunables->rtg_boost_freq = DEFAULT_CPU0_RTG_BOOST_FREQ; + break; + case 4: + tunables->rtg_boost_freq = DEFAULT_CPU4_RTG_BOOST_FREQ; + break; + case 7: + tunables->rtg_boost_freq = DEFAULT_CPU7_RTG_BOOST_FREQ; + break; + } + policy->governor_data = sg_policy; sg_policy->tunables = tunables; + + util = target_util(sg_policy, sg_policy->tunables->rtg_boost_freq); + sg_policy->rtg_boost_util = util; + stale_ns = sched_ravg_window + (sched_ravg_window >> 3); sugov_tunables_restore(policy); diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 010d5d12f34f..03bbb74e3124 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -21,6 +21,10 @@ #include "sched.h" +#if defined(CONFIG_ANDROID) && !defined(CONFIG_DEBUG_FS) +#define CONFIG_DEBUG_FS +#endif + static DEFINE_SPINLOCK(sched_debug_lock); /* diff --git a/kernel/sched/energy.c b/kernel/sched/energy.c index b4764309dac4..9efd07394bd7 100644 --- a/kernel/sched/energy.c +++ b/kernel/sched/energy.c @@ -290,8 +290,6 @@ static int sched_energy_probe(struct platform_device *pdev) kfree(max_frequencies); - walt_map_freq_to_load(); - dev_info(&pdev->dev, "Sched-energy-costs capacity updated\n"); return 0; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 4c8ce879092f..14035a2d28db 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -145,7 +145,7 @@ unsigned int sysctl_sched_child_runs_first __read_mostly; unsigned int sysctl_sched_wakeup_granularity = 1000000UL; unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; -const_debug unsigned int sysctl_sched_migration_cost = 500000UL; +unsigned int __read_mostly sysctl_sched_migration_cost = 500000UL; DEFINE_PER_CPU_READ_MOSTLY(int, sched_load_boost); #ifdef CONFIG_SCHED_WALT @@ -683,6 +683,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) return rb_entry(last, struct sched_entity, run_node); } +#endif /************************************************************** * Scheduling class statistics methods: @@ -710,7 +711,6 @@ int sched_proc_update_handler(struct ctl_table *table, int write, return 0; } -#endif /* * delta /= w diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 99bc9e5cf7db..823235abde4a 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -110,7 +110,6 @@ struct sched_cluster { int notifier_sent; bool wake_up_idle; u64 aggr_grp_load; - u64 coloc_boost_load; }; extern unsigned int sched_disable_window_stats; @@ -1909,7 +1908,7 @@ extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); extern const_debug unsigned int sysctl_sched_time_avg; extern const_debug unsigned int sysctl_sched_nr_migrate; -extern const_debug unsigned int sysctl_sched_migration_cost; +extern unsigned int __read_mostly sysctl_sched_migration_cost; static inline u64 sched_avg_period(void) { @@ -2072,6 +2071,7 @@ struct sched_walt_cpu_load { unsigned long prev_window_util; unsigned long nl; unsigned long pl; + bool rtgb_active; u64 ws; }; @@ -2096,6 +2096,7 @@ static inline unsigned long cpu_util_cum(int cpu, int delta) u64 freq_policy_load(struct rq *rq); extern u64 walt_load_reported_window; +extern bool rtgb_active; static inline unsigned long cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load) @@ -2131,6 +2132,7 @@ cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load) walt_load->nl = nl; walt_load->pl = pl; walt_load->ws = walt_load_reported_window; + walt_load->rtgb_active = rtgb_active; } return (util >= capacity) ? capacity : util; @@ -2985,7 +2987,6 @@ static inline enum sched_boost_policy task_boost_policy(struct task_struct *p) return policy; } -extern void walt_map_freq_to_load(void); extern void walt_update_min_max_capacity(void); static inline bool is_min_capacity_cluster(struct sched_cluster *cluster) @@ -3132,7 +3133,6 @@ static inline unsigned int power_cost(int cpu, u64 demand) #endif static inline void note_task_waking(struct task_struct *p, u64 wallclock) { } -static inline void walt_map_freq_to_load(void) { } static inline void walt_update_min_max_capacity(void) { } #endif /* CONFIG_SCHED_WALT */ diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c index 51862ccdb719..404c15fbf472 100644 --- a/kernel/sched/tune.c +++ b/kernel/sched/tune.c @@ -105,7 +105,7 @@ root_schedtune = { * implementation especially for the computation of the per-CPU boost * value */ -#define BOOSTGROUPS_COUNT 6 +#define BOOSTGROUPS_COUNT 8 /* Array of configured boostgroups */ static struct schedtune *allocated_group[BOOSTGROUPS_COUNT] = { diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c index 2fa92b7996b1..65f88377a778 100644 --- a/kernel/sched/walt.c +++ b/kernel/sched/walt.c @@ -514,7 +514,6 @@ u64 freq_policy_load(struct rq *rq) struct sched_cluster *cluster = rq->cluster; u64 aggr_grp_load = cluster->aggr_grp_load; u64 load, tt_load = 0; - u64 coloc_boost_load = cluster->coloc_boost_load; if (rq->ed_task != NULL) { load = sched_ravg_window; @@ -526,9 +525,6 @@ u64 freq_policy_load(struct rq *rq) else load = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum; - if (coloc_boost_load) - load = max_t(u64, load, coloc_boost_load); - tt_load = top_task_load(rq); switch (reporting_policy) { case FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK: @@ -545,9 +541,7 @@ u64 freq_policy_load(struct rq *rq) done: trace_sched_load_to_gov(rq, aggr_grp_load, tt_load, sched_freq_aggr_en, - load, reporting_policy, walt_rotation_enabled, - sysctl_sched_little_cluster_coloc_fmin_khz, - coloc_boost_load); + load, reporting_policy, walt_rotation_enabled); return load; } @@ -2031,11 +2025,6 @@ void init_new_task_load(struct task_struct *p) memset(&p->ravg, 0, sizeof(struct ravg)); p->cpu_cycles = 0; - p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), - GFP_KERNEL | __GFP_NOFAIL); - p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), - GFP_KERNEL | __GFP_NOFAIL); - if (init_load_pct) { init_load_windows = div64_u64((u64)init_load_pct * (u64)sched_ravg_window, 100); @@ -2052,46 +2041,28 @@ void init_new_task_load(struct task_struct *p) p->misfit = false; } -/* - * kfree() may wakeup kswapd. So this function should NOT be called - * with any CPU's rq->lock acquired. - */ -void free_task_load_ptrs(struct task_struct *p) -{ - kfree(p->ravg.curr_window_cpu); - kfree(p->ravg.prev_window_cpu); - - /* - * update_task_ravg() can be called for exiting tasks. While the - * function itself ensures correct behavior, the corresponding - * trace event requires that these pointers be NULL. - */ - p->ravg.curr_window_cpu = NULL; - p->ravg.prev_window_cpu = NULL; -} - void reset_task_stats(struct task_struct *p) { - u32 sum = 0; - u32 *curr_window_ptr = NULL; - u32 *prev_window_ptr = NULL; + u32 sum; + u32 curr_window_saved[CONFIG_NR_CPUS]; + u32 prev_window_saved[CONFIG_NR_CPUS]; if (exiting_task(p)) { sum = EXITING_TASK_MARKER; + + memset(&p->ravg, 0, sizeof(struct ravg)); + + /* Retain EXITING_TASK marker */ + p->ravg.sum_history[0] = sum; } else { - curr_window_ptr = p->ravg.curr_window_cpu; - prev_window_ptr = p->ravg.prev_window_cpu; - memset(curr_window_ptr, 0, sizeof(u32) * nr_cpu_ids); - memset(prev_window_ptr, 0, sizeof(u32) * nr_cpu_ids); + memcpy(curr_window_saved, p->ravg.curr_window_cpu, sizeof(curr_window_saved)); + memcpy(prev_window_saved, p->ravg.prev_window_cpu, sizeof(prev_window_saved)); + + memset(&p->ravg, 0, sizeof(struct ravg)); + + memcpy(p->ravg.curr_window_cpu, curr_window_saved, sizeof(curr_window_saved)); + memcpy(p->ravg.prev_window_cpu, prev_window_saved, sizeof(prev_window_saved)); } - - memset(&p->ravg, 0, sizeof(struct ravg)); - - p->ravg.curr_window_cpu = curr_window_ptr; - p->ravg.prev_window_cpu = prev_window_ptr; - - /* Retain EXITING_TASK marker */ - p->ravg.sum_history[0] = sum; } void mark_task_starting(struct task_struct *p) @@ -2347,7 +2318,6 @@ struct sched_cluster init_cluster = { .notifier_sent = 0, .wake_up_idle = 0, .aggr_grp_load = 0, - .coloc_boost_load = 0, }; void init_clusters(void) @@ -3113,69 +3083,21 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp, BUG_ON((s64)*src_nt_prev_runnable_sum < 0); } -/* Set to 1GHz by default */ -unsigned int sysctl_sched_little_cluster_coloc_fmin_khz = 1000000; -static u64 coloc_boost_load; +bool rtgb_active; -void walt_map_freq_to_load(void) -{ - struct sched_cluster *cluster; - - for_each_sched_cluster(cluster) { - if (is_min_capacity_cluster(cluster)) { - int fcpu = cluster_first_cpu(cluster); - - coloc_boost_load = div64_u64( - ((u64)sched_ravg_window * - arch_scale_cpu_capacity(NULL, fcpu) * - sysctl_sched_little_cluster_coloc_fmin_khz), - (u64)1024 * cpu_max_possible_freq(fcpu)); - coloc_boost_load = div64_u64(coloc_boost_load << 2, 5); - break; - } - } -} - -static void walt_update_coloc_boost_load(void) +static bool is_rtgb_active(void) { struct related_thread_group *grp; - struct sched_cluster *cluster; - if (!sysctl_sched_little_cluster_coloc_fmin_khz || - sched_boost() == CONSERVATIVE_BOOST) - return; + if (sched_boost() == CONSERVATIVE_BOOST) + return false; grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID); if (!grp || !grp->preferred_cluster || is_min_capacity_cluster(grp->preferred_cluster)) - return; + return false; - for_each_sched_cluster(cluster) { - if (is_min_capacity_cluster(cluster)) { - cluster->coloc_boost_load = coloc_boost_load; - break; - } - } -} - -int sched_little_cluster_coloc_fmin_khz_handler(struct ctl_table *table, - int write, void __user *buffer, size_t *lenp, - loff_t *ppos) -{ - int ret; - static DEFINE_MUTEX(mutex); - - mutex_lock(&mutex); - - ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); - if (ret || !write) - goto done; - - walt_map_freq_to_load(); - -done: - mutex_unlock(&mutex); - return ret; + return true; } /* @@ -3223,13 +3145,14 @@ void walt_irq_work(struct irq_work *irq_work) cluster->aggr_grp_load = aggr_grp_load; total_grp_load += aggr_grp_load; - cluster->coloc_boost_load = 0; raw_spin_unlock(&cluster->load_lock); } if (total_grp_load) - walt_update_coloc_boost_load(); + rtgb_active = is_rtgb_active(); + else + rtgb_active = false; for_each_sched_cluster(cluster) { cpumask_t cluster_online_cpus; diff --git a/kernel/signal.c b/kernel/signal.c index a067e2c8942b..4e459d7ccee2 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -42,8 +42,6 @@ #include #include #include -#include -#include #define CREATE_TRACE_POINTS #include @@ -1344,11 +1342,8 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) ret = check_kill_permission(sig, info, p); rcu_read_unlock(); - if (!ret && sig) { + if (!ret && sig) ret = do_send_sig_info(sig, info, p, true); - if (capable(CAP_KILL) && sig == SIGKILL) - add_to_oom_reaper(p); - } return ret; } diff --git a/kernel/sys.c b/kernel/sys.c index acc4c0333985..a08073ebf19a 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -2434,10 +2434,18 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, error = SET_ENDIAN(me, arg2); break; case PR_GET_SECCOMP: +#ifdef CONFIG_SECCOMP error = prctl_get_seccomp(); +#else + error = 0; +#endif break; case PR_SET_SECCOMP: +#ifdef CONFIG_SECCOMP error = prctl_set_seccomp(arg2, (char __user *)arg3); +#else + error = 0; +#endif break; case PR_GET_TSC: error = GET_TSC_CTL(arg2); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 06ae2c717e69..e5f178c08ede 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -135,9 +135,6 @@ static unsigned long one_ul = 1; static unsigned long long_max = LONG_MAX; static int one_hundred = 100; static int one_thousand = 1000; -#ifdef CONFIG_SCHED_WALT -static int two_million = 2000000; -#endif #ifdef CONFIG_PRINTK static int ten_thousand = 10000; #endif @@ -297,7 +294,6 @@ static struct ctl_table sysctl_base_table[] = { { } }; -#ifdef CONFIG_SCHED_DEBUG static int min_sched_granularity_ns = 100000; /* 100 usecs */ static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ static int min_wakeup_granularity_ns; /* 0 usecs */ @@ -306,7 +302,6 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */ static int min_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE; static int max_sched_tunable_scaling = SCHED_TUNABLESCALING_END-1; #endif /* CONFIG_SMP */ -#endif /* CONFIG_SCHED_DEBUG */ #ifdef CONFIG_COMPACTION static int min_extfrag_threshold; @@ -419,15 +414,6 @@ static struct ctl_table kern_table[] = { .extra1 = &zero, .extra2 = &one_thousand, }, - { - .procname = "sched_little_cluster_coloc_fmin_khz", - .data = &sysctl_sched_little_cluster_coloc_fmin_khz, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = sched_little_cluster_coloc_fmin_khz_handler, - .extra1 = &zero, - .extra2 = &two_million, - }, #endif { .procname = "sched_upmigrate", @@ -443,16 +429,6 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = sched_updown_migrate_handler, }, -#ifdef CONFIG_SCHED_DEBUG - { - .procname = "sched_min_granularity_ns", - .data = &sysctl_sched_min_granularity, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = sched_proc_update_handler, - .extra1 = &min_sched_granularity_ns, - .extra2 = &max_sched_granularity_ns, - }, { .procname = "sched_latency_ns", .data = &sysctl_sched_latency, @@ -462,20 +438,6 @@ static struct ctl_table kern_table[] = { .extra1 = &min_sched_granularity_ns, .extra2 = &max_sched_granularity_ns, }, - { - .procname = "sched_sync_hint_enable", - .data = &sysctl_sched_sync_hint_enable, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "sched_cstate_aware", - .data = &sysctl_sched_cstate_aware, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, { .procname = "sched_wakeup_granularity_ns", .data = &sysctl_sched_wakeup_granularity, @@ -502,6 +464,32 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, +#endif +#ifdef CONFIG_SCHED_DEBUG + { + .procname = "sched_min_granularity_ns", + .data = &sysctl_sched_min_granularity, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_proc_update_handler, + .extra1 = &min_sched_granularity_ns, + .extra2 = &max_sched_granularity_ns, + }, + { + .procname = "sched_sync_hint_enable", + .data = &sysctl_sched_sync_hint_enable, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "sched_cstate_aware", + .data = &sysctl_sched_cstate_aware, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, +#ifdef CONFIG_SMP { .procname = "sched_nr_migrate", .data = &sysctl_sched_nr_migrate, @@ -1454,13 +1442,6 @@ static struct ctl_table vm_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, - { - .procname = "reap_mem_on_sigkill", - .data = &sysctl_reap_mem_on_sigkill, - .maxlen = sizeof(sysctl_reap_mem_on_sigkill), - .mode = 0644, - .proc_handler = proc_dointvec, - }, { .procname = "overcommit_ratio", .data = &sysctl_overcommit_ratio, diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 17ce25a49835..d0eaf99c3ef1 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -560,7 +560,7 @@ static void sync_cmos_clock(struct work_struct *work) if (!fail || fail == -ENODEV) next.tv_sec = 659; else - next.tv_sec = 0; + next.tv_sec = 10; if (next.tv_nsec >= NSEC_PER_SEC) { next.tv_sec++; diff --git a/kernel/time/time.c b/kernel/time/time.c index 319935af02fb..241c25955fdd 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -335,543 +335,6 @@ COMPAT_SYSCALL_DEFINE1(adjtimex, struct compat_timex __user *, utp) } #endif -/* - * Convert jiffies to milliseconds and back. - * - * Avoid unnecessary multiplications/divisions in the - * two most common HZ cases: - */ -unsigned int jiffies_to_msecs(const unsigned long j) -{ -#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) - return (MSEC_PER_SEC / HZ) * j; -#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) - return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); -#else -# if BITS_PER_LONG == 32 - return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >> - HZ_TO_MSEC_SHR32; -# else - return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN); -# endif -#endif -} -EXPORT_SYMBOL(jiffies_to_msecs); - -unsigned int jiffies_to_usecs(const unsigned long j) -{ - /* - * Hz usually doesn't go much further MSEC_PER_SEC. - * jiffies_to_usecs() and usecs_to_jiffies() depend on that. - */ - BUILD_BUG_ON(HZ > USEC_PER_SEC); - -#if !(USEC_PER_SEC % HZ) - return (USEC_PER_SEC / HZ) * j; -#else -# if BITS_PER_LONG == 32 - return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32; -# else - return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN; -# endif -#endif -} -EXPORT_SYMBOL(jiffies_to_usecs); - -/** - * timespec_trunc - Truncate timespec to a granularity - * @t: Timespec - * @gran: Granularity in ns. - * - * Truncate a timespec to a granularity. Always rounds down. gran must - * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns). - */ -struct timespec timespec_trunc(struct timespec t, unsigned gran) -{ - /* Avoid division in the common cases 1 ns and 1 s. */ - if (gran == 1) { - /* nothing */ - } else if (gran == NSEC_PER_SEC) { - t.tv_nsec = 0; - } else if (gran > 1 && gran < NSEC_PER_SEC) { - t.tv_nsec -= t.tv_nsec % gran; - } else { - WARN(1, "illegal file time granularity: %u", gran); - } - return t; -} -EXPORT_SYMBOL(timespec_trunc); - -/* - * mktime64 - Converts date to seconds. - * Converts Gregorian date to seconds since 1970-01-01 00:00:00. - * Assumes input in normal date format, i.e. 1980-12-31 23:59:59 - * => year=1980, mon=12, day=31, hour=23, min=59, sec=59. - * - * [For the Julian calendar (which was used in Russia before 1917, - * Britain & colonies before 1752, anywhere else before 1582, - * and is still in use by some communities) leave out the - * -year/100+year/400 terms, and add 10.] - * - * This algorithm was first published by Gauss (I think). - * - * A leap second can be indicated by calling this function with sec as - * 60 (allowable under ISO 8601). The leap second is treated the same - * as the following second since they don't exist in UNIX time. - * - * An encoding of midnight at the end of the day as 24:00:00 - ie. midnight - * tomorrow - (allowable under ISO 8601) is supported. - */ -time64_t mktime64(const unsigned int year0, const unsigned int mon0, - const unsigned int day, const unsigned int hour, - const unsigned int min, const unsigned int sec) -{ - unsigned int mon = mon0, year = year0; - - /* 1..12 -> 11,12,1..10 */ - if (0 >= (int) (mon -= 2)) { - mon += 12; /* Puts Feb last since it has leap day */ - year -= 1; - } - - return ((((time64_t) - (year/4 - year/100 + year/400 + 367*mon/12 + day) + - year*365 - 719499 - )*24 + hour /* now have hours - midnight tomorrow handled here */ - )*60 + min /* now have minutes */ - )*60 + sec; /* finally seconds */ -} -EXPORT_SYMBOL(mktime64); - -/** - * set_normalized_timespec - set timespec sec and nsec parts and normalize - * - * @ts: pointer to timespec variable to be set - * @sec: seconds to set - * @nsec: nanoseconds to set - * - * Set seconds and nanoseconds field of a timespec variable and - * normalize to the timespec storage format - * - * Note: The tv_nsec part is always in the range of - * 0 <= tv_nsec < NSEC_PER_SEC - * For negative values only the tv_sec field is negative ! - */ -void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec) -{ - while (nsec >= NSEC_PER_SEC) { - /* - * The following asm() prevents the compiler from - * optimising this loop into a modulo operation. See - * also __iter_div_u64_rem() in include/linux/time.h - */ - asm("" : "+rm"(nsec)); - nsec -= NSEC_PER_SEC; - ++sec; - } - while (nsec < 0) { - asm("" : "+rm"(nsec)); - nsec += NSEC_PER_SEC; - --sec; - } - ts->tv_sec = sec; - ts->tv_nsec = nsec; -} -EXPORT_SYMBOL(set_normalized_timespec); - -/** - * ns_to_timespec - Convert nanoseconds to timespec - * @nsec: the nanoseconds value to be converted - * - * Returns the timespec representation of the nsec parameter. - */ -struct timespec ns_to_timespec(const s64 nsec) -{ - struct timespec ts; - s32 rem; - - if (!nsec) - return (struct timespec) {0, 0}; - - ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem); - if (unlikely(rem < 0)) { - ts.tv_sec--; - rem += NSEC_PER_SEC; - } - ts.tv_nsec = rem; - - return ts; -} -EXPORT_SYMBOL(ns_to_timespec); - -/** - * ns_to_timeval - Convert nanoseconds to timeval - * @nsec: the nanoseconds value to be converted - * - * Returns the timeval representation of the nsec parameter. - */ -struct timeval ns_to_timeval(const s64 nsec) -{ - struct timespec ts = ns_to_timespec(nsec); - struct timeval tv; - - tv.tv_sec = ts.tv_sec; - tv.tv_usec = (suseconds_t) ts.tv_nsec / 1000; - - return tv; -} -EXPORT_SYMBOL(ns_to_timeval); - -#if BITS_PER_LONG == 32 -/** - * set_normalized_timespec - set timespec sec and nsec parts and normalize - * - * @ts: pointer to timespec variable to be set - * @sec: seconds to set - * @nsec: nanoseconds to set - * - * Set seconds and nanoseconds field of a timespec variable and - * normalize to the timespec storage format - * - * Note: The tv_nsec part is always in the range of - * 0 <= tv_nsec < NSEC_PER_SEC - * For negative values only the tv_sec field is negative ! - */ -void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec) -{ - while (nsec >= NSEC_PER_SEC) { - /* - * The following asm() prevents the compiler from - * optimising this loop into a modulo operation. See - * also __iter_div_u64_rem() in include/linux/time.h - */ - asm("" : "+rm"(nsec)); - nsec -= NSEC_PER_SEC; - ++sec; - } - while (nsec < 0) { - asm("" : "+rm"(nsec)); - nsec += NSEC_PER_SEC; - --sec; - } - ts->tv_sec = sec; - ts->tv_nsec = nsec; -} -EXPORT_SYMBOL(set_normalized_timespec64); - -/** - * ns_to_timespec64 - Convert nanoseconds to timespec64 - * @nsec: the nanoseconds value to be converted - * - * Returns the timespec64 representation of the nsec parameter. - */ -struct timespec64 ns_to_timespec64(const s64 nsec) -{ - struct timespec64 ts; - s32 rem; - - if (!nsec) - return (struct timespec64) {0, 0}; - - ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem); - if (unlikely(rem < 0)) { - ts.tv_sec--; - rem += NSEC_PER_SEC; - } - ts.tv_nsec = rem; - - return ts; -} -EXPORT_SYMBOL(ns_to_timespec64); -#endif -/** - * msecs_to_jiffies: - convert milliseconds to jiffies - * @m: time in milliseconds - * - * conversion is done as follows: - * - * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET) - * - * - 'too large' values [that would result in larger than - * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too. - * - * - all other values are converted to jiffies by either multiplying - * the input value by a factor or dividing it with a factor and - * handling any 32-bit overflows. - * for the details see __msecs_to_jiffies() - * - * msecs_to_jiffies() checks for the passed in value being a constant - * via __builtin_constant_p() allowing gcc to eliminate most of the - * code, __msecs_to_jiffies() is called if the value passed does not - * allow constant folding and the actual conversion must be done at - * runtime. - * the _msecs_to_jiffies helpers are the HZ dependent conversion - * routines found in include/linux/jiffies.h - */ -unsigned long __msecs_to_jiffies(const unsigned int m) -{ - /* - * Negative value, means infinite timeout: - */ - if ((int)m < 0) - return MAX_JIFFY_OFFSET; - return _msecs_to_jiffies(m); -} -EXPORT_SYMBOL(__msecs_to_jiffies); - -unsigned long __usecs_to_jiffies(const unsigned int u) -{ - if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) - return MAX_JIFFY_OFFSET; - return _usecs_to_jiffies(u); -} -EXPORT_SYMBOL(__usecs_to_jiffies); - -/* - * The TICK_NSEC - 1 rounds up the value to the next resolution. Note - * that a remainder subtract here would not do the right thing as the - * resolution values don't fall on second boundries. I.e. the line: - * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding. - * Note that due to the small error in the multiplier here, this - * rounding is incorrect for sufficiently large values of tv_nsec, but - * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're - * OK. - * - * Rather, we just shift the bits off the right. - * - * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec - * value to a scaled second value. - */ -static unsigned long -__timespec64_to_jiffies(u64 sec, long nsec) -{ - nsec = nsec + TICK_NSEC - 1; - - if (sec >= MAX_SEC_IN_JIFFIES){ - sec = MAX_SEC_IN_JIFFIES; - nsec = 0; - } - return ((sec * SEC_CONVERSION) + - (((u64)nsec * NSEC_CONVERSION) >> - (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC; - -} - -static unsigned long -__timespec_to_jiffies(unsigned long sec, long nsec) -{ - return __timespec64_to_jiffies((u64)sec, nsec); -} - -unsigned long -timespec64_to_jiffies(const struct timespec64 *value) -{ - return __timespec64_to_jiffies(value->tv_sec, value->tv_nsec); -} -EXPORT_SYMBOL(timespec64_to_jiffies); - -void -jiffies_to_timespec64(const unsigned long jiffies, struct timespec64 *value) -{ - /* - * Convert jiffies to nanoseconds and separate with - * one divide. - */ - u32 rem; - value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC, - NSEC_PER_SEC, &rem); - value->tv_nsec = rem; -} -EXPORT_SYMBOL(jiffies_to_timespec64); - -/* - * We could use a similar algorithm to timespec_to_jiffies (with a - * different multiplier for usec instead of nsec). But this has a - * problem with rounding: we can't exactly add TICK_NSEC - 1 to the - * usec value, since it's not necessarily integral. - * - * We could instead round in the intermediate scaled representation - * (i.e. in units of 1/2^(large scale) jiffies) but that's also - * perilous: the scaling introduces a small positive error, which - * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1 - * units to the intermediate before shifting) leads to accidental - * overflow and overestimates. - * - * At the cost of one additional multiplication by a constant, just - * use the timespec implementation. - */ -unsigned long -timeval_to_jiffies(const struct timeval *value) -{ - return __timespec_to_jiffies(value->tv_sec, - value->tv_usec * NSEC_PER_USEC); -} -EXPORT_SYMBOL(timeval_to_jiffies); - -void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value) -{ - /* - * Convert jiffies to nanoseconds and separate with - * one divide. - */ - u32 rem; - - value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC, - NSEC_PER_SEC, &rem); - value->tv_usec = rem / NSEC_PER_USEC; -} -EXPORT_SYMBOL(jiffies_to_timeval); - -/* - * Convert jiffies/jiffies_64 to clock_t and back. - */ -clock_t jiffies_to_clock_t(unsigned long x) -{ -#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 -# if HZ < USER_HZ - return x * (USER_HZ / HZ); -# else - return x / (HZ / USER_HZ); -# endif -#else - return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ); -#endif -} -EXPORT_SYMBOL(jiffies_to_clock_t); - -unsigned long clock_t_to_jiffies(unsigned long x) -{ -#if (HZ % USER_HZ)==0 - if (x >= ~0UL / (HZ / USER_HZ)) - return ~0UL; - return x * (HZ / USER_HZ); -#else - /* Don't worry about loss of precision here .. */ - if (x >= ~0UL / HZ * USER_HZ) - return ~0UL; - - /* .. but do try to contain it here */ - return div_u64((u64)x * HZ, USER_HZ); -#endif -} -EXPORT_SYMBOL(clock_t_to_jiffies); - -u64 jiffies_64_to_clock_t(u64 x) -{ -#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 -# if HZ < USER_HZ - x = div_u64(x * USER_HZ, HZ); -# elif HZ > USER_HZ - x = div_u64(x, HZ / USER_HZ); -# else - /* Nothing to do */ -# endif -#else - /* - * There are better ways that don't overflow early, - * but even this doesn't overflow in hundreds of years - * in 64 bits, so.. - */ - x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ)); -#endif - return x; -} -EXPORT_SYMBOL(jiffies_64_to_clock_t); - -u64 nsec_to_clock_t(u64 x) -{ -#if (NSEC_PER_SEC % USER_HZ) == 0 - return div_u64(x, NSEC_PER_SEC / USER_HZ); -#elif (USER_HZ % 512) == 0 - return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512); -#else - /* - * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024, - * overflow after 64.99 years. - * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ... - */ - return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ); -#endif -} - -u64 jiffies64_to_nsecs(u64 j) -{ -#if !(NSEC_PER_SEC % HZ) - return (NSEC_PER_SEC / HZ) * j; -# else - return div_u64(j * HZ_TO_NSEC_NUM, HZ_TO_NSEC_DEN); -#endif -} -EXPORT_SYMBOL(jiffies64_to_nsecs); - -/** - * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64 - * - * @n: nsecs in u64 - * - * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64. - * And this doesn't return MAX_JIFFY_OFFSET since this function is designed - * for scheduler, not for use in device drivers to calculate timeout value. - * - * note: - * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512) - * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years - */ -u64 nsecs_to_jiffies64(u64 n) -{ -#if (NSEC_PER_SEC % HZ) == 0 - /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */ - return div_u64(n, NSEC_PER_SEC / HZ); -#elif (HZ % 512) == 0 - /* overflow after 292 years if HZ = 1024 */ - return div_u64(n * HZ / 512, NSEC_PER_SEC / 512); -#else - /* - * Generic case - optimized for cases where HZ is a multiple of 3. - * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc. - */ - return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ); -#endif -} -EXPORT_SYMBOL(nsecs_to_jiffies64); - -/** - * nsecs_to_jiffies - Convert nsecs in u64 to jiffies - * - * @n: nsecs in u64 - * - * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64. - * And this doesn't return MAX_JIFFY_OFFSET since this function is designed - * for scheduler, not for use in device drivers to calculate timeout value. - * - * note: - * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512) - * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years - */ -unsigned long nsecs_to_jiffies(u64 n) -{ - return (unsigned long)nsecs_to_jiffies64(n); -} -EXPORT_SYMBOL_GPL(nsecs_to_jiffies); - -/* - * Add two timespec values and do a safety check for overflow. - * It's assumed that both values are valid (>= 0) - */ -struct timespec timespec_add_safe(const struct timespec lhs, - const struct timespec rhs) -{ - struct timespec res; - - set_normalized_timespec(&res, lhs.tv_sec + rhs.tv_sec, - lhs.tv_nsec + rhs.tv_nsec); - - if (res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec) - res.tv_sec = TIME_T_MAX; - - return res; -} - /* * Add two timespec64 values and do a safety check for overflow. * It's assumed that both values are valid (>= 0). diff --git a/kernel/time/timer.c b/kernel/time/timer.c index c4f2116ef470..86815151b8cc 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1730,9 +1730,20 @@ void run_local_timers(void) raise_softirq(TIMER_SOFTIRQ); } -static void process_timeout(unsigned long __data) +/* + * Since schedule_timeout()'s timer is defined on the stack, it must store + * the target task on the stack as well. + */ +struct process_timer { + struct timer_list timer; + struct task_struct *task; +}; + +static void process_timeout(struct timer_list *t) { - wake_up_process((struct task_struct *)__data); + struct process_timer *timeout = from_timer(timeout, t, timer); + + wake_up_process(timeout->task); } /** @@ -1766,7 +1777,7 @@ static void process_timeout(unsigned long __data) */ signed long __sched schedule_timeout(signed long timeout) { - struct timer_list timer; + struct process_timer timer; unsigned long expire; switch (timeout) @@ -1800,13 +1811,14 @@ signed long __sched schedule_timeout(signed long timeout) expire = timeout + jiffies; - setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); - __mod_timer(&timer, expire, false); + timer.task = current; + timer_setup_on_stack(&timer.timer, process_timeout, 0); + __mod_timer(&timer.timer, expire, false); schedule(); - del_singleshot_timer_sync(&timer); + del_singleshot_timer_sync(&timer.timer); /* Remove the timer from the object tracker */ - destroy_timer_on_stack(&timer); + destroy_timer_on_stack(&timer.timer); timeout = expire - jiffies; diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index dede59d579c7..b262db2b39ea 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -96,24 +96,6 @@ config IPC_LOGGING If in doubt, say no. -config QCOM_RTB - bool "Register tracing" - help - Enable the kernel to trace every kernel function. This is done - Add support for logging different events to a small uncached - region. This is designed to aid in debugging reset cases where the - caches may not be flushed before the target resets. - -config QCOM_RTB_SEPARATE_CPUS - bool "Separate entries for each cpu" - depends on QCOM_RTB - depends on SMP - help - Under some circumstances, it may be beneficial to give dedicated space - for each cpu to log accesses. Selecting this option will log each cpu - separately. This will guarantee that the last acesses for each cpu - will be logged but there will be fewer entries per cpu - # All tracer options should select GENERIC_TRACER. For those options that are # enabled by all tracers (context switch and event tracer) they select TRACING. # This allows those options to appear when no other tracer is selected. But the @@ -135,6 +117,20 @@ config GENERIC_TRACER bool select TRACING +if TRACING + +config DISABLE_TRACE_PRINTK + bool "Force disable trace_printk() usage" + default y + help + When trace_printk() is used in any of the kernel source, it enables + debugging functions which are not desired for production kernel. + Enabling this option will replace trace_printk() with pr_debug(). + + If in doubt, say Y. + +endif + # # Minimum requirements an architecture has to meet for us to # be able to offer generic tracing facilities: diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 6883b7951cb8..f3ff62da1f7e 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -73,7 +73,6 @@ obj-$(CONFIG_GPU_TRACEPOINTS) += gpu-traces.o obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o -obj-$(CONFIG_QCOM_RTB) += msm_rtb.o obj-$(CONFIG_IPC_LOGGING) += ipc_logging.o ifdef CONFIG_DEBUG_FS obj-$(CONFIG_IPC_LOGGING) += ipc_logging_debug.o diff --git a/kernel/trace/msm_rtb.c b/kernel/trace/msm_rtb.c deleted file mode 100644 index abfa9c95d1de..000000000000 --- a/kernel/trace/msm_rtb.c +++ /dev/null @@ -1,347 +0,0 @@ -/* - * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define SENTINEL_BYTE_1 0xFF -#define SENTINEL_BYTE_2 0xAA -#define SENTINEL_BYTE_3 0xFF - -#define RTB_COMPAT_STR "qcom,msm-rtb" - -/* Write - * 1) 3 bytes sentinel - * 2) 1 bytes of log type - * 3) 8 bytes of where the caller came from - * 4) 4 bytes index - * 4) 8 bytes extra data from the caller - * 5) 8 bytes of timestamp - * 6) 8 bytes of cyclecount - * - * Total = 40 bytes. - */ -struct msm_rtb_layout { - unsigned char sentinel[3]; - unsigned char log_type; - uint32_t idx; - uint64_t caller; - uint64_t data; - uint64_t timestamp; - uint64_t cycle_count; -} __attribute__ ((__packed__)); - - -struct msm_rtb_state { - struct msm_rtb_layout *rtb; - phys_addr_t phys; - int nentries; - int size; - int enabled; - int initialized; - uint32_t filter; - int step_size; -}; - -#if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS) -DEFINE_PER_CPU(atomic_t, msm_rtb_idx_cpu); -#else -static atomic_t msm_rtb_idx; -#endif - -static struct msm_rtb_state msm_rtb = { - .filter = 1 << LOGK_LOGBUF, - .enabled = 1, -}; - -module_param_named(filter, msm_rtb.filter, uint, 0644); -module_param_named(enable, msm_rtb.enabled, int, 0644); - -static int msm_rtb_panic_notifier(struct notifier_block *this, - unsigned long event, void *ptr) -{ - msm_rtb.enabled = 0; - return NOTIFY_DONE; -} - -static struct notifier_block msm_rtb_panic_blk = { - .notifier_call = msm_rtb_panic_notifier, - .priority = INT_MAX, -}; - -int notrace msm_rtb_event_should_log(enum logk_event_type log_type) -{ - return msm_rtb.initialized && msm_rtb.enabled && - ((1 << (log_type & ~LOGTYPE_NOPC)) & msm_rtb.filter); -} -EXPORT_SYMBOL(msm_rtb_event_should_log); - -static void msm_rtb_emit_sentinel(struct msm_rtb_layout *start) -{ - start->sentinel[0] = SENTINEL_BYTE_1; - start->sentinel[1] = SENTINEL_BYTE_2; - start->sentinel[2] = SENTINEL_BYTE_3; -} - -static void msm_rtb_write_type(enum logk_event_type log_type, - struct msm_rtb_layout *start) -{ - start->log_type = (char)log_type; -} - -static void msm_rtb_write_caller(uint64_t caller, struct msm_rtb_layout *start) -{ - start->caller = caller; -} - -static void msm_rtb_write_idx(uint32_t idx, - struct msm_rtb_layout *start) -{ - start->idx = idx; -} - -static void msm_rtb_write_data(uint64_t data, struct msm_rtb_layout *start) -{ - start->data = data; -} - -static void msm_rtb_write_timestamp(struct msm_rtb_layout *start) -{ - start->timestamp = sched_clock(); -} - -static void msm_rtb_write_cyclecount(struct msm_rtb_layout *start) -{ - start->cycle_count = get_cycles(); -} - -static void uncached_logk_pc_idx(enum logk_event_type log_type, uint64_t caller, - uint64_t data, int idx) -{ - struct msm_rtb_layout *start; - - start = &msm_rtb.rtb[idx & (msm_rtb.nentries - 1)]; - - msm_rtb_emit_sentinel(start); - msm_rtb_write_type(log_type, start); - msm_rtb_write_caller(caller, start); - msm_rtb_write_idx(idx, start); - msm_rtb_write_data(data, start); - msm_rtb_write_timestamp(start); - msm_rtb_write_cyclecount(start); - mb(); - -} - -static void uncached_logk_timestamp(int idx) -{ - unsigned long long timestamp; - - timestamp = sched_clock(); - uncached_logk_pc_idx(LOGK_TIMESTAMP|LOGTYPE_NOPC, - (uint64_t)lower_32_bits(timestamp), - (uint64_t)upper_32_bits(timestamp), idx); -} - -#if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS) -static int msm_rtb_get_idx(void) -{ - int cpu, i, offset; - atomic_t *index; - - /* - * ideally we would use get_cpu but this is a close enough - * approximation for our purposes. - */ - cpu = raw_smp_processor_id(); - - index = &per_cpu(msm_rtb_idx_cpu, cpu); - - i = atomic_add_return(msm_rtb.step_size, index); - i -= msm_rtb.step_size; - - /* Check if index has wrapped around */ - offset = (i & (msm_rtb.nentries - 1)) - - ((i - msm_rtb.step_size) & (msm_rtb.nentries - 1)); - if (offset < 0) { - uncached_logk_timestamp(i); - i = atomic_add_return(msm_rtb.step_size, index); - i -= msm_rtb.step_size; - } - - return i; -} -#else -static int msm_rtb_get_idx(void) -{ - int i, offset; - - i = atomic_inc_return(&msm_rtb_idx); - i--; - - /* Check if index has wrapped around */ - offset = (i & (msm_rtb.nentries - 1)) - - ((i - 1) & (msm_rtb.nentries - 1)); - if (offset < 0) { - uncached_logk_timestamp(i); - i = atomic_inc_return(&msm_rtb_idx); - i--; - } - - return i; -} -#endif - -int notrace uncached_logk_pc(enum logk_event_type log_type, void *caller, - void *data) -{ - int i; - - if (!msm_rtb_event_should_log(log_type)) - return 0; - - i = msm_rtb_get_idx(); - uncached_logk_pc_idx(log_type, (uint64_t)((unsigned long) caller), - (uint64_t)((unsigned long) data), i); - - return 1; -} -EXPORT_SYMBOL(uncached_logk_pc); - -noinline int notrace uncached_logk(enum logk_event_type log_type, void *data) -{ - return uncached_logk_pc(log_type, __builtin_return_address(0), data); -} -EXPORT_SYMBOL(uncached_logk); - -static int msm_rtb_probe(struct platform_device *pdev) -{ - struct msm_rtb_platform_data *d = pdev->dev.platform_data; - struct md_region md_entry; -#if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS) - unsigned int cpu; -#endif - int ret; - - if (!pdev->dev.of_node) { - msm_rtb.size = d->size; - } else { - u64 size; - struct device_node *pnode; - - pnode = of_parse_phandle(pdev->dev.of_node, - "linux,contiguous-region", 0); - if (pnode != NULL) { - const u32 *addr; - - addr = of_get_address(pnode, 0, &size, NULL); - if (!addr) { - of_node_put(pnode); - return -EINVAL; - } - of_node_put(pnode); - } else { - ret = of_property_read_u32(pdev->dev.of_node, - "qcom,rtb-size", - (u32 *)&size); - if (ret < 0) - return ret; - - } - - msm_rtb.size = size; - } - - if (msm_rtb.size <= 0 || msm_rtb.size > SZ_1M) - return -EINVAL; - - msm_rtb.rtb = dma_alloc_coherent(&pdev->dev, msm_rtb.size, - &msm_rtb.phys, - GFP_KERNEL); - - if (!msm_rtb.rtb) - return -ENOMEM; - - msm_rtb.nentries = msm_rtb.size / sizeof(struct msm_rtb_layout); - - /* Round this down to a power of 2 */ - msm_rtb.nentries = __rounddown_pow_of_two(msm_rtb.nentries); - - memset(msm_rtb.rtb, 0, msm_rtb.size); - - strlcpy(md_entry.name, "KRTB_BUF", sizeof(md_entry.name)); - md_entry.virt_addr = (uintptr_t)msm_rtb.rtb; - md_entry.phys_addr = msm_rtb.phys; - md_entry.size = msm_rtb.size; - if (msm_minidump_add_region(&md_entry)) - pr_info("Failed to add RTB in Minidump\n"); - -#if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS) - for_each_possible_cpu(cpu) { - atomic_t *a = &per_cpu(msm_rtb_idx_cpu, cpu); - - atomic_set(a, cpu); - } - msm_rtb.step_size = num_possible_cpus(); -#else - atomic_set(&msm_rtb_idx, 0); - msm_rtb.step_size = 1; -#endif - - atomic_notifier_chain_register(&panic_notifier_list, - &msm_rtb_panic_blk); - msm_rtb.initialized = 1; - return 0; -} - -static const struct of_device_id msm_match_table[] = { - {.compatible = RTB_COMPAT_STR}, - {}, -}; - -static struct platform_driver msm_rtb_driver = { - .probe = msm_rtb_probe, - .driver = { - .name = "msm_rtb", - .owner = THIS_MODULE, - .of_match_table = msm_match_table - }, -}; - -static int __init msm_rtb_init(void) -{ - return platform_driver_register(&msm_rtb_driver); -} - -static void __exit msm_rtb_exit(void) -{ - platform_driver_unregister(&msm_rtb_driver); -} -module_init(msm_rtb_init) -module_exit(msm_rtb_exit) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 572440dd35c0..ee7157e76df9 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -351,6 +351,20 @@ config HEADERS_CHECK exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in your build tree), to make sure they're suitable. +config OPTIMIZE_INLINING + bool "Allow compiler to uninline functions marked 'inline'" + help + This option determines if the kernel forces gcc to inline the functions + developers have marked 'inline'. Doing so takes away freedom from gcc to + do what it thinks is best, which is desirable for the gcc 3.x series of + compilers. The gcc 4.x series have a rewritten inlining algorithm and + enabling this option will generate a smaller kernel there. Hopefully + this algorithm is so good that allowing gcc 4.x and above to make the + decision will become the default in the future. Until then this option + is there to test gcc for this. + + If unsure, say N. + config DEBUG_SECTION_MISMATCH bool "Enable full Section mismatch analysis" help @@ -1039,7 +1053,7 @@ config PANIC_TIMEOUT config SCHED_DEBUG bool "Collect scheduler debugging info" - depends on DEBUG_KERNEL && PROC_FS + depends on PROC_FS default y help If you say Y here, the /proc/sched_debug file will be provided diff --git a/lib/Makefile b/lib/Makefile index f29a816df3ec..4cfeba73334b 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -37,6 +37,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ earlycpio.o seq_buf.o siphash.o \ nmi_backtrace.o nodemask.o win_minmax.o +CFLAGS_kobject_uevent.o += -Wframe-larger-than=3072 lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o diff --git a/lib/bitmap.c b/lib/bitmap.c index fbe38a83acb3..c2d548131662 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -606,7 +606,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, /* if no digit is after '-', it's wrong*/ if (at_start && in_range) return -EINVAL; - if (!(a <= b) || group_size == 0 || !(used_size <= group_size)) + if (!(a <= b) || !(used_size <= group_size)) return -EINVAL; if (b >= nmaskbits) return -ERANGE; diff --git a/lib/crc32.c b/lib/crc32.c index cd0e32c1889d..be999d91428b 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -183,31 +183,38 @@ static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p, } #if CRC_LE_BITS == 1 -u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len) +u32 __pure crc32_le_base(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE); } -u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len) +u32 __pure __crc32c_le_base(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, NULL, CRC32C_POLY_LE); } #else -u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len) +u32 __pure crc32_le_base(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, (const u32 (*)[256])crc32table_le, CRC32_POLY_LE); } -u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len) +u32 __pure __crc32c_le_base(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE); } #endif -EXPORT_SYMBOL(crc32_le); -EXPORT_SYMBOL(__crc32c_le); -u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le); -u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le); +u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len) +{ + return crc32_le_base(crc, p, len); +} +EXPORT_SYMBOL(crc32_le); + +u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len) +{ + return __crc32c_le_base(crc, p, len); +} +EXPORT_SYMBOL(__crc32c_le); /* * This multiplies the polynomials x and y modulo the given modulus. diff --git a/lib/debug_locks.c b/lib/debug_locks.c index 124fdf238b3d..73b861e03d4b 100644 --- a/lib/debug_locks.c +++ b/lib/debug_locks.c @@ -21,7 +21,7 @@ * that would just muddy the log. So we report the first one and * shut up after that. */ -int debug_locks = 1; +int debug_locks = 0; EXPORT_SYMBOL_GPL(debug_locks); /* diff --git a/lib/iomap.c b/lib/iomap.c index f45ea96c77a2..541d926da95e 100644 --- a/lib/iomap.c +++ b/lib/iomap.c @@ -6,7 +6,6 @@ */ #include #include -#include #include @@ -72,31 +71,26 @@ static void bad_io_access(unsigned long port, const char *access) unsigned int ioread8(void __iomem *addr) { - uncached_logk_pc(LOGK_READL, __builtin_return_address(0), addr); - IO_COND(addr, return inb(port), return readb_no_log(addr)); + IO_COND(addr, return inb(port), return readb(addr)); return 0xff; } unsigned int ioread16(void __iomem *addr) { - uncached_logk_pc(LOGK_READL, __builtin_return_address(0), addr); - IO_COND(addr, return inw(port), return readw_no_log(addr)); + IO_COND(addr, return inw(port), return readw(addr)); return 0xffff; } unsigned int ioread16be(void __iomem *addr) { - uncached_logk_pc(LOGK_READL, __builtin_return_address(0), addr); IO_COND(addr, return pio_read16be(port), return mmio_read16be(addr)); return 0xffff; } unsigned int ioread32(void __iomem *addr) { - uncached_logk_pc(LOGK_READL, __builtin_return_address(0), addr); - IO_COND(addr, return inl(port), return readl_no_log(addr)); + IO_COND(addr, return inl(port), return readl(addr)); return 0xffffffff; } unsigned int ioread32be(void __iomem *addr) { - uncached_logk_pc(LOGK_READL, __builtin_return_address(0), addr); IO_COND(addr, return pio_read32be(port), return mmio_read32be(addr)); return 0xffffffff; } @@ -118,27 +112,22 @@ EXPORT_SYMBOL(ioread32be); void iowrite8(u8 val, void __iomem *addr) { - uncached_logk_pc(LOGK_WRITEL, __builtin_return_address(0), addr); - IO_COND(addr, outb(val, port), writeb_no_log(val, addr)); + IO_COND(addr, outb(val,port), writeb(val, addr)); } void iowrite16(u16 val, void __iomem *addr) { - uncached_logk_pc(LOGK_WRITEL, __builtin_return_address(0), addr); - IO_COND(addr, outw(val, port), writew_no_log(val, addr)); + IO_COND(addr, outw(val,port), writew(val, addr)); } void iowrite16be(u16 val, void __iomem *addr) { - uncached_logk_pc(LOGK_WRITEL, __builtin_return_address(0), addr); IO_COND(addr, pio_write16be(val,port), mmio_write16be(val, addr)); } void iowrite32(u32 val, void __iomem *addr) { - uncached_logk_pc(LOGK_WRITEL, __builtin_return_address(0), addr); - IO_COND(addr, outl(val, port), writel_no_log(val, addr)); + IO_COND(addr, outl(val,port), writel(val, addr)); } void iowrite32be(u32 val, void __iomem *addr) { - uncached_logk_pc(LOGK_WRITEL, __builtin_return_address(0), addr); IO_COND(addr, pio_write32be(val,port), mmio_write32be(val, addr)); } EXPORT_SYMBOL(iowrite8); diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 3916cf0e2f0a..a8551aec5e53 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -327,7 +327,7 @@ static void zap_modalias_env(struct kobj_uevent_env *env) int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, char *envp_ext[]) { - struct kobj_uevent_env *env; + struct kobj_uevent_env env; const char *action_string = kobject_actions[action]; const char *devpath = NULL; const char *subsystem; @@ -393,11 +393,6 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, return 0; } - /* environment buffer */ - env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); - if (!env) - return -ENOMEM; - /* complete object path */ devpath = kobject_get_path(kobj, GFP_KERNEL); if (!devpath) { @@ -405,21 +400,23 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, goto exit; } + memset(&env, 0, sizeof(env)); + /* default keys */ - retval = add_uevent_var(env, "ACTION=%s", action_string); + retval = add_uevent_var(&env, "ACTION=%s", action_string); if (retval) goto exit; - retval = add_uevent_var(env, "DEVPATH=%s", devpath); + retval = add_uevent_var(&env, "DEVPATH=%s", devpath); if (retval) goto exit; - retval = add_uevent_var(env, "SUBSYSTEM=%s", subsystem); + retval = add_uevent_var(&env, "SUBSYSTEM=%s", subsystem); if (retval) goto exit; /* keys passed in from the caller */ if (envp_ext) { for (i = 0; envp_ext[i]; i++) { - retval = add_uevent_var(env, "%s", envp_ext[i]); + retval = add_uevent_var(&env, "%s", envp_ext[i]); if (retval) goto exit; } @@ -427,7 +424,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, /* let the kset specific function add its stuff */ if (uevent_ops && uevent_ops->uevent) { - retval = uevent_ops->uevent(kset, kobj, env); + retval = uevent_ops->uevent(kset, kobj, &env); if (retval) { pr_debug("kobject: '%s' (%p): %s: uevent() returned " "%d\n", kobject_name(kobj), kobj, @@ -449,7 +446,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, break; case KOBJ_UNBIND: - zap_modalias_env(env); + zap_modalias_env(&env); break; default: @@ -458,7 +455,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, mutex_lock(&uevent_sock_mutex); /* we will send an event, so request a new sequence number */ - retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)++uevent_seqnum); + retval = add_uevent_var(&env, "SEQNUM=%llu", (unsigned long long)++uevent_seqnum); if (retval) { mutex_unlock(&uevent_sock_mutex); goto exit; @@ -476,7 +473,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, /* allocate message with the maximum possible size */ len = strlen(action_string) + strlen(devpath) + 2; - skb = alloc_skb(len + env->buflen, GFP_KERNEL); + skb = alloc_skb(len + env.buflen, GFP_KERNEL); if (skb) { char *scratch; @@ -485,10 +482,10 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, sprintf(scratch, "%s@%s", action_string, devpath); /* copy keys to our continuous event payload buffer */ - for (i = 0; i < env->envp_idx; i++) { - len = strlen(env->envp[i]) + 1; + for (i = 0; i < env.envp_idx; i++) { + len = strlen(env.envp[i]) + 1; scratch = skb_put(skb, len); - strcpy(scratch, env->envp[i]); + strcpy(scratch, env.envp[i]); } NETLINK_CB(skb).dst_group = 1; @@ -510,31 +507,28 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, if (uevent_helper[0] && !kobj_usermode_filter(kobj)) { struct subprocess_info *info; - retval = add_uevent_var(env, "HOME=/"); + retval = add_uevent_var(&env, "HOME=/"); if (retval) goto exit; - retval = add_uevent_var(env, + retval = add_uevent_var(&env, "PATH=/sbin:/bin:/usr/sbin:/usr/bin"); if (retval) goto exit; - retval = init_uevent_argv(env, subsystem); + retval = init_uevent_argv(&env, subsystem); if (retval) goto exit; retval = -ENOMEM; - info = call_usermodehelper_setup(env->argv[0], env->argv, - env->envp, GFP_KERNEL, - NULL, cleanup_uevent_env, env); - if (info) { + info = call_usermodehelper_setup(env.argv[0], env.argv, + env.envp, GFP_KERNEL, + NULL, cleanup_uevent_env, &env); + if (info) retval = call_usermodehelper_exec(info, UMH_NO_WAIT); - env = NULL; /* freed by cleanup_uevent_env */ - } } #endif exit: kfree(devpath); - kfree(env); return retval; } EXPORT_SYMBOL_GPL(kobject_uevent_env); diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 834c846c5af8..a11127d47b33 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -163,30 +163,12 @@ EXPORT_SYMBOL(sg_init_one); */ static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) { - if (nents == SG_MAX_SINGLE_ALLOC) { - /* - * Kmemleak doesn't track page allocations as they are not - * commonly used (in a raw form) for kernel data structures. - * As we chain together a list of pages and then a normal - * kmalloc (tracked by kmemleak), in order to for that last - * allocation not to become decoupled (and thus a - * false-positive) we need to inform kmemleak of all the - * intermediate allocations. - */ - void *ptr = (void *) __get_free_page(gfp_mask); - kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); - return ptr; - } else - return kmalloc(nents * sizeof(struct scatterlist), gfp_mask); + return kmalloc(nents * sizeof(struct scatterlist), gfp_mask); } static void sg_kfree(struct scatterlist *sg, unsigned int nents) { - if (nents == SG_MAX_SINGLE_ALLOC) { - kmemleak_free(sg); - free_page((unsigned long) sg); - } else - kfree(sg); + kfree(sg); } /** diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index 0a6f492fb9d9..ae8a830e4e54 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c @@ -218,10 +218,6 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = { {-EINVAL, "-1", NULL, 8, 0}, {-EINVAL, "-0", NULL, 8, 0}, {-EINVAL, "10-1", NULL, 8, 0}, - {-EINVAL, "0-31:", NULL, 8, 0}, - {-EINVAL, "0-31:0", NULL, 8, 0}, - {-EINVAL, "0-31:0/0", NULL, 8, 0}, - {-EINVAL, "0-31:1/0", NULL, 8, 0}, {-EINVAL, "0-31:10/1", NULL, 8, 0}, }; diff --git a/mm/compaction.c b/mm/compaction.c index 4455fc8afdbd..bfbf9a4fb771 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -23,6 +23,10 @@ #include #include #include +#include +#include +#include +#include #include "internal.h" #ifdef CONFIG_COMPACTION @@ -761,13 +765,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, /* * Periodically drop the lock (if held) regardless of its - * contention, to give chance to IRQs. Abort async compaction - * if contended. + * contention, to give chance to IRQs. Abort completely if + * a fatal signal is pending. */ if (!(low_pfn % SWAP_CLUSTER_MAX) && compact_unlock_should_abort(zone_lru_lock(zone), flags, - &locked, cc)) - break; + &locked, cc)) { + low_pfn = 0; + goto fatal_pending; + } if (!pfn_valid_within(low_pfn)) goto isolate_fail; @@ -960,6 +966,7 @@ isolate_fail: trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, nr_scanned, nr_isolated); +fatal_pending: cc->total_migrate_scanned += nr_scanned; if (nr_isolated) count_compact_events(COMPACTISOLATED, nr_isolated); @@ -1825,6 +1832,53 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, return rc; } +static struct workqueue_struct *compaction_wq; +static struct delayed_work compaction_work; +static bool screen_on = true; +static int compaction_timeout_ms = 900000; +module_param_named(compaction_forced_timeout_ms, compaction_timeout_ms, int, + 0644); +static int compaction_soff_delay_ms = 3000; +module_param_named(compaction_screen_off_delay_ms, compaction_soff_delay_ms, int, + 0644); +static unsigned long compaction_forced_timeout; + + +static int msm_drm_notifier_callback(struct notifier_block *self, unsigned long event, void *data) +{ + struct msm_drm_notifier *evdata = data; + int *blank; + + if (event != MSM_DRM_EVENT_BLANK) + return 0; + + if (evdata->id != MSM_DRM_PRIMARY_DISPLAY) + return 0; + + if (evdata && evdata->data) { + blank = evdata->data; + + switch (*blank) { + case MSM_DRM_BLANK_POWERDOWN: + screen_on = false; + if (time_after(jiffies, compaction_forced_timeout) && !delayed_work_busy(&compaction_work)) { + compaction_forced_timeout = jiffies + msecs_to_jiffies(compaction_timeout_ms); + queue_delayed_work(compaction_wq, &compaction_work, + msecs_to_jiffies(compaction_soff_delay_ms)); + } + break; + case MSM_DRM_BLANK_UNBLANK: + screen_on = true; + break; + } + } + + return 0; +} + +static struct notifier_block compaction_notifier_block = { + .notifier_call = msm_drm_notifier_callback, +}; /* Compact all zones within a node */ static void compact_node(int nid) @@ -1868,6 +1922,23 @@ static void compact_nodes(void) compact_node(nid); } +static void do_compaction(struct work_struct *work) +{ + /* Return early if the screen is on */ + if (screen_on) + return; + + pr_info("Scheduled memory compaction is starting\n"); + + /* Do full compaction */ + compact_nodes(); + + /* Force compaction timeout */ + compaction_forced_timeout = jiffies + msecs_to_jiffies(compaction_timeout_ms); + + pr_info("Scheduled memory compaction is completed\n"); +} + /* The written value is actually unused, all memory is compacted */ int sysctl_compact_memory; @@ -2153,4 +2224,19 @@ static int __init kcompactd_init(void) } subsys_initcall(kcompactd_init) +static int __init scheduled_compaction_init(void) +{ + compaction_wq = create_freezable_workqueue("compaction_wq"); + + if (!compaction_wq) + return -EFAULT; + + INIT_DELAYED_WORK(&compaction_work, do_compaction); + + msm_drm_register_client(&compaction_notifier_block); + + return 0; +} +late_initcall(scheduled_compaction_init); + #endif /* CONFIG_COMPACTION */ diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 560852d10c7b..e334ddf76b77 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -589,11 +589,10 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, struct kmemleak_object *object, *parent; struct rb_node **link, *rb_parent; - object = mem_pool_alloc(gfp); - if (!object) { - pr_warn("Cannot allocate a kmemleak_object structure\n"); - kmemleak_disable(); - return NULL; + while (1) { + object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); + if (object) + break; } INIT_LIST_HEAD(&object->object_list); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 8622bd16d1a0..e709b06ce36a 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -53,7 +53,6 @@ int sysctl_panic_on_oom; int sysctl_oom_kill_allocating_task; int sysctl_oom_dump_tasks = 1; -int sysctl_reap_mem_on_sigkill; DEFINE_MUTEX(oom_lock); @@ -605,21 +604,13 @@ void wake_oom_reaper(struct task_struct *tsk) if (!oom_reaper_th) return; - /* - * Move the lock here to avoid scenario of queuing - * the same task by both OOM killer and any other SIGKILL - * path. - */ - spin_lock(&oom_reaper_lock); - /* mm is already queued? */ - if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags)) { - spin_unlock(&oom_reaper_lock); + if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags)) return; - } get_task_struct(tsk); + spin_lock(&oom_reaper_lock); tsk->oom_reaper_list = oom_reaper_list; oom_reaper_list = tsk; spin_unlock(&oom_reaper_lock); @@ -644,16 +635,6 @@ static inline void wake_oom_reaper(struct task_struct *tsk) } #endif /* CONFIG_MMU */ -static void __mark_oom_victim(struct task_struct *tsk) -{ - struct mm_struct *mm = tsk->mm; - - if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) { - mmgrab(tsk->signal->oom_mm); - set_bit(MMF_OOM_VICTIM, &mm->flags); - } -} - /** * mark_oom_victim - mark the given task as OOM victim * @tsk: task to mark @@ -666,13 +647,18 @@ static void __mark_oom_victim(struct task_struct *tsk) */ static void mark_oom_victim(struct task_struct *tsk) { + struct mm_struct *mm = tsk->mm; + WARN_ON(oom_killer_disabled); /* OOM killer might race with memcg OOM */ if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE)) return; /* oom_mm is bound to the signal struct life time. */ - __mark_oom_victim(tsk); + if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) { + mmgrab(tsk->signal->oom_mm); + set_bit(MMF_OOM_VICTIM, &mm->flags); + } /* * Make sure that the task is woken up from uninterruptible sleep @@ -1114,58 +1100,3 @@ void pagefault_out_of_memory(void) out_of_memory(&oc); mutex_unlock(&oom_lock); } - -/* Call this function with task_lock being held as we're accessing ->mm */ -void dump_killed_info(struct task_struct *selected) -{ - int selected_tasksize = get_mm_rss(selected->mm); - - pr_info_ratelimited("Killing '%s' (%d), adj %hd,\n" - " to free %ldkB on behalf of '%s' (%d)\n" - " Free CMA is %ldkB\n" - " Total reserve is %ldkB\n" - " Total free pages is %ldkB\n" - " Total file cache is %ldkB\n", - selected->comm, selected->pid, - selected->signal->oom_score_adj, - selected_tasksize * (long)(PAGE_SIZE / 1024), - current->comm, current->pid, - global_zone_page_state(NR_FREE_CMA_PAGES) * - (long)(PAGE_SIZE / 1024), - totalreserve_pages * (long)(PAGE_SIZE / 1024), - global_zone_page_state(NR_FREE_PAGES) * - (long)(PAGE_SIZE / 1024), - global_node_page_state(NR_FILE_PAGES) * - (long)(PAGE_SIZE / 1024)); -} - -void add_to_oom_reaper(struct task_struct *p) -{ - static DEFINE_RATELIMIT_STATE(reaper_rs, DEFAULT_RATELIMIT_INTERVAL, - DEFAULT_RATELIMIT_BURST); - - if (!sysctl_reap_mem_on_sigkill) - return; - - p = find_lock_task_mm(p); - if (!p) - return; - - get_task_struct(p); - if (task_will_free_mem(p)) { - __mark_oom_victim(p); - wake_oom_reaper(p); - } - - dump_killed_info(p); - task_unlock(p); - - if (__ratelimit(&reaper_rs) && p->signal->oom_score_adj == 0) { - show_mem(SHOW_MEM_FILTER_NODES, NULL); - show_mem_call_notifiers(); - if (sysctl_oom_dump_tasks) - dump_tasks(NULL, NULL); - } - - put_task_struct(p); -} diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 87fa2336b76e..cb85ab62135f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4576,8 +4576,14 @@ void page_frag_free(void *addr) { struct page *page = virt_to_head_page(addr); - if (unlikely(put_page_testzero(page))) - __free_pages_ok(page, compound_order(page)); + if (unlikely(put_page_testzero(page))) { + unsigned int order = compound_order(page); + + if (order == 0) /* Via pcp? */ + free_hot_cold_page(page, false); + else + __free_pages_ok(page, order); + } } EXPORT_SYMBOL(page_frag_free); diff --git a/mm/slab_common.c b/mm/slab_common.c index 20da89561fd2..386898fb0e70 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -462,6 +462,9 @@ kmem_cache_create(const char *name, size_t size, size_t align, */ flags &= CACHE_CREATE_MASK; + /* Embrace davem */ + flags |= SLAB_HWCACHE_ALIGN; + s = __kmem_cache_alias(name, size, align, flags, ctor); if (s) goto out_unlock; diff --git a/mm/swap_state.c b/mm/swap_state.c index f8b76b0b2639..1f757b703927 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -583,6 +583,10 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, if (!mask) goto skip; + /* If exiting, don't do swap readahead. */ + if (current->flags & PF_EXITING) + goto skip; + do_poll = false; /* Read a page_cluster sized and aligned cluster around offset. */ start_offset = offset & ~mask; diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 3f9509679f0e..00f7af43c89c 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -347,12 +347,18 @@ static struct tnode *tnode_alloc(int bits) static inline void empty_child_inc(struct key_vector *n) { - ++tn_info(n)->empty_children ? : ++tn_info(n)->full_children; + tn_info(n)->empty_children++; + + if (!tn_info(n)->empty_children) + tn_info(n)->full_children++; } static inline void empty_child_dec(struct key_vector *n) { - tn_info(n)->empty_children-- ? : tn_info(n)->full_children--; + if (!tn_info(n)->empty_children) + tn_info(n)->full_children--; + + tn_info(n)->empty_children--; } static struct key_vector *leaf_new(t_key key, struct fib_alias *fa) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 8098e5f4426a..d0192e877b0b 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1236,6 +1236,8 @@ __nf_conntrack_alloc(struct net *net, if (ct == NULL) goto out; + kmemleak_not_leak(ct); + spin_lock_init(&ct->lock); ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL; diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c index 9fe0ddc333fb..62fc32fae1c9 100644 --- a/net/netfilter/nf_conntrack_extend.c +++ b/net/netfilter/nf_conntrack_extend.c @@ -75,6 +75,8 @@ void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) if (!new) return NULL; + kmemleak_not_leak(new); + if (!old) { memset(new->offset, 0, sizeof(new->offset)); ct->ext = new; diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index 31acc6f33d98..e10ffe370400 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -119,7 +119,7 @@ static void xfrmi_dev_free(struct net_device *dev) free_percpu(dev->tstats); } -static int xfrmi_create2(struct net_device *dev) +static int xfrmi_create(struct net_device *dev) { struct xfrm_if *xi = netdev_priv(dev); struct net *net = dev_net(dev); @@ -142,54 +142,7 @@ out: return err; } -static struct xfrm_if *xfrmi_create(struct net *net, struct xfrm_if_parms *p) -{ - struct net_device *dev; - struct xfrm_if *xi; - char name[IFNAMSIZ]; - int err; - - if (p->name[0]) { - strlcpy(name, p->name, IFNAMSIZ); - } else { - err = -EINVAL; - goto failed; - } - - dev = alloc_netdev(sizeof(*xi), name, NET_NAME_UNKNOWN, xfrmi_dev_setup); - if (!dev) { - err = -EAGAIN; - goto failed; - } - - dev_net_set(dev, net); - - xi = netdev_priv(dev); - xi->p = *p; - xi->net = net; - xi->dev = dev; - xi->phydev = dev_get_by_index(net, p->link); - if (!xi->phydev) { - err = -ENODEV; - goto failed_free; - } - - err = xfrmi_create2(dev); - if (err < 0) - goto failed_dev_put; - - return xi; - -failed_dev_put: - dev_put(xi->phydev); -failed_free: - free_netdev(dev); -failed: - return ERR_PTR(err); -} - -static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p, - int create) +static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p) { struct xfrm_if __rcu **xip; struct xfrm_if *xi; @@ -197,17 +150,11 @@ static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p, for (xip = &xfrmn->xfrmi[0]; (xi = rtnl_dereference(*xip)) != NULL; - xip = &xi->next) { - if (xi->p.if_id == p->if_id) { - if (create) - return ERR_PTR(-EEXIST); - + xip = &xi->next) + if (xi->p.if_id == p->if_id) return xi; - } - } - if (!create) - return ERR_PTR(-ENODEV); - return xfrmi_create(net, p); + + return NULL; } static void xfrmi_dev_uninit(struct net_device *dev) @@ -675,21 +622,33 @@ static int xfrmi_newlink(struct net *src_net, struct net_device *dev, struct netlink_ext_ack *extack) { struct net *net = dev_net(dev); - struct xfrm_if_parms *p; + struct xfrm_if_parms p; struct xfrm_if *xi; + int err; - xi = netdev_priv(dev); - p = &xi->p; - - xfrmi_netlink_parms(data, p); + xfrmi_netlink_parms(data, &p); if (!tb[IFLA_IFNAME]) return -EINVAL; - nla_strlcpy(p->name, tb[IFLA_IFNAME], IFNAMSIZ); + nla_strlcpy(p.name, tb[IFLA_IFNAME], IFNAMSIZ); - xi = xfrmi_locate(net, p, 1); - return PTR_ERR_OR_ZERO(xi); + xi = xfrmi_locate(net, &p); + if (xi) + return -EEXIST; + + xi = netdev_priv(dev); + xi->p = p; + xi->net = net; + xi->dev = dev; + xi->phydev = dev_get_by_index(net, p.link); + if (!xi->phydev) + return -ENODEV; + + err = xfrmi_create(dev); + if (err < 0) + dev_put(xi->phydev); + return err; } static void xfrmi_dellink(struct net_device *dev, struct list_head *head) @@ -706,9 +665,8 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[], xfrmi_netlink_parms(data, &xi->p); - xi = xfrmi_locate(net, &xi->p, 0); - - if (IS_ERR_OR_NULL(xi)) { + xi = xfrmi_locate(net, &xi->p); + if (!xi) { xi = netdev_priv(dev); } else { if (xi->dev != dev) diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig index 8297e48a283d..2728517339a1 100644 --- a/security/selinux/Kconfig +++ b/security/selinux/Kconfig @@ -1,6 +1,6 @@ config SECURITY_SELINUX bool "NSA SELinux Support" - depends on SECURITY_NETWORK && AUDIT && NET && INET + depends on SECURITY_NETWORK && NET && INET select NETWORK_SECMARK default n help diff --git a/security/selinux/avc.c b/security/selinux/avc.c index 58959b793297..d10b03f36823 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c @@ -128,7 +128,7 @@ static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass) { return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1); } - +#ifdef CONFIG_AUDIT /** * avc_dump_av - Display an access vector in human-readable form. * @tclass: target security class @@ -197,6 +197,7 @@ static void avc_dump_query(struct audit_buffer *ab, struct selinux_state *state, BUG_ON(!tclass || tclass >= ARRAY_SIZE(secclass_map)); audit_log_format(ab, " tclass=%s", secclass_map[tclass-1].name); } +#endif /** * avc_init - Initialize the AVC. @@ -491,6 +492,7 @@ static inline int avc_xperms_audit(struct selinux_state *state, u8 perm, int result, struct common_audit_data *ad) { +#ifdef CONFIG_AUDIT u32 audited, denied; audited = avc_xperms_audit_required( @@ -499,6 +501,9 @@ static inline int avc_xperms_audit(struct selinux_state *state, return 0; return slow_avc_audit(state, ssid, tsid, tclass, requested, audited, denied, result, ad, 0); +#else + return 0; +#endif } static void avc_node_free(struct rcu_head *rhead) @@ -726,6 +731,7 @@ found: return node; } +#ifdef CONFIG_AUDIT /** * avc_audit_pre_callback - SELinux specific information * will be called by generic audit code @@ -762,6 +768,7 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a) } } + /* This is the slow part of avc audit with big stack footprint */ noinline int slow_avc_audit(struct selinux_state *state, u32 ssid, u32 tsid, u16 tclass, @@ -772,6 +779,10 @@ noinline int slow_avc_audit(struct selinux_state *state, struct common_audit_data stack_data; struct selinux_audit_data sad; + /* Only log permissive=1 messages for SECURITY_SELINUX_DEVELOP */ + if (denied && !result) + return 0; + if (!a) { a = &stack_data; a->type = LSM_AUDIT_DATA_NONE; @@ -802,6 +813,7 @@ noinline int slow_avc_audit(struct selinux_state *state, common_lsm_audit(a, avc_audit_pre_callback, avc_audit_post_callback); return 0; } +#endif /** * avc_add_callback - Register a callback for security events. diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index f4b416af3b90..bd10409531c6 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -3158,6 +3158,7 @@ static noinline int audit_inode_permission(struct inode *inode, int result, unsigned flags) { +#ifdef CONFIG_AUDIT struct common_audit_data ad; struct inode_security_struct *isec = inode->i_security; int rc; @@ -3170,6 +3171,7 @@ static noinline int audit_inode_permission(struct inode *inode, audited, denied, result, &ad, flags); if (rc) return rc; +#endif return 0; } diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h index ef899bcfd2cb..e6a4c1f66636 100644 --- a/security/selinux/include/avc.h +++ b/security/selinux/include/avc.h @@ -131,6 +131,7 @@ static inline int avc_audit(struct selinux_state *state, struct common_audit_data *a, int flags) { +#ifdef CONFIG_AUDIT u32 audited, denied; audited = avc_audit_required(requested, avd, result, 0, &denied); if (likely(!audited)) @@ -138,6 +139,9 @@ static inline int avc_audit(struct selinux_state *state, return slow_avc_audit(state, ssid, tsid, tclass, requested, audited, denied, result, a, flags); +#else + return 0; +#endif } #define AVC_STRICT 1 /* Ignore permissive mode. */ diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 98c418060032..7974a1cc1927 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -3530,6 +3530,7 @@ out: return match; } +#ifdef CONFIG_AUDIT static int (*aurule_callback)(void) = audit_update_lsm_rules; static int aurule_avc_callback(u32 event) @@ -3552,6 +3553,7 @@ static int __init aurule_init(void) return err; } __initcall(aurule_init); +#endif #ifdef CONFIG_NETLABEL /** diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c index 507fd5210c1c..ee150f8988a9 100644 --- a/sound/core/control_compat.c +++ b/sound/core/control_compat.c @@ -94,55 +94,52 @@ struct snd_ctl_elem_info32 { static int snd_ctl_elem_info_compat(struct snd_ctl_file *ctl, struct snd_ctl_elem_info32 __user *data32) { - struct snd_ctl_elem_info *data; + struct snd_ctl_elem_info data; int err; - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (! data) - return -ENOMEM; - + memset(&data, 0, sizeof(data)); err = -EFAULT; /* copy id */ - if (copy_from_user(&data->id, &data32->id, sizeof(data->id))) + if (copy_from_user(&data.id, &data32->id, sizeof(data.id))) goto error; /* we need to copy the item index. * hope this doesn't break anything.. */ - if (get_user(data->value.enumerated.item, &data32->value.enumerated.item)) + if (get_user(data.value.enumerated.item, &data32->value.enumerated.item)) goto error; err = snd_power_wait(ctl->card, SNDRV_CTL_POWER_D0); if (err < 0) goto error; - err = snd_ctl_elem_info(ctl, data); + err = snd_ctl_elem_info(ctl, &data); if (err < 0) goto error; /* restore info to 32bit */ err = -EFAULT; /* id, type, access, count */ - if (copy_to_user(&data32->id, &data->id, sizeof(data->id)) || - copy_to_user(&data32->type, &data->type, 3 * sizeof(u32))) + if (copy_to_user(&data32->id, &data.id, sizeof(data.id)) || + copy_to_user(&data32->type, &data.type, 3 * sizeof(u32))) goto error; - if (put_user(data->owner, &data32->owner)) + if (put_user(data.owner, &data32->owner)) goto error; - switch (data->type) { + switch (data.type) { case SNDRV_CTL_ELEM_TYPE_BOOLEAN: case SNDRV_CTL_ELEM_TYPE_INTEGER: - if (put_user(data->value.integer.min, &data32->value.integer.min) || - put_user(data->value.integer.max, &data32->value.integer.max) || - put_user(data->value.integer.step, &data32->value.integer.step)) + if (put_user(data.value.integer.min, &data32->value.integer.min) || + put_user(data.value.integer.max, &data32->value.integer.max) || + put_user(data.value.integer.step, &data32->value.integer.step)) goto error; break; case SNDRV_CTL_ELEM_TYPE_INTEGER64: if (copy_to_user(&data32->value.integer64, - &data->value.integer64, - sizeof(data->value.integer64))) + &data.value.integer64, + sizeof(data.value.integer64))) goto error; break; case SNDRV_CTL_ELEM_TYPE_ENUMERATED: if (copy_to_user(&data32->value.enumerated, - &data->value.enumerated, - sizeof(data->value.enumerated))) + &data.value.enumerated, + sizeof(data.value.enumerated))) goto error; break; default: @@ -150,7 +147,6 @@ static int snd_ctl_elem_info_compat(struct snd_ctl_file *ctl, } err = 0; error: - kfree(data); return err; } @@ -187,7 +183,7 @@ static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id, int *countp) { struct snd_kcontrol *kctl; - struct snd_ctl_elem_info *info; + struct snd_ctl_elem_info info; int err; down_read(&card->controls_rwsem); @@ -196,19 +192,13 @@ static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id, up_read(&card->controls_rwsem); return -ENOENT; } - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (info == NULL) { - up_read(&card->controls_rwsem); - return -ENOMEM; - } - info->id = *id; - err = kctl->info(kctl, info); + info = (typeof(info)){ .id = *id }; + err = kctl->info(kctl, &info); up_read(&card->controls_rwsem); if (err >= 0) { - err = info->type; - *countp = info->count; + err = info.type; + *countp = info.count; } - kfree(info); return err; } @@ -301,14 +291,11 @@ static int copy_ctl_value_to_user(void __user *userdata, static int ctl_elem_read_user(struct snd_card *card, void __user *userdata, void __user *valuep) { - struct snd_ctl_elem_value *data; + struct snd_ctl_elem_value data; int err, type, count; - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (data == NULL) - return -ENOMEM; - - err = copy_ctl_value_from_user(card, data, userdata, valuep, + memset(&data, 0, sizeof(data)); + err = copy_ctl_value_from_user(card, &data, userdata, valuep, &type, &count); if (err < 0) goto error; @@ -316,27 +303,23 @@ static int ctl_elem_read_user(struct snd_card *card, err = snd_power_wait(card, SNDRV_CTL_POWER_D0); if (err < 0) goto error; - err = snd_ctl_elem_read(card, data); + err = snd_ctl_elem_read(card, &data); if (err < 0) goto error; - err = copy_ctl_value_to_user(userdata, valuep, data, type, count); + err = copy_ctl_value_to_user(userdata, valuep, &data, type, count); error: - kfree(data); return err; } static int ctl_elem_write_user(struct snd_ctl_file *file, void __user *userdata, void __user *valuep) { - struct snd_ctl_elem_value *data; + struct snd_ctl_elem_value data; struct snd_card *card = file->card; int err, type, count; - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (data == NULL) - return -ENOMEM; - - err = copy_ctl_value_from_user(card, data, userdata, valuep, + memset(&data, 0, sizeof(data)); + err = copy_ctl_value_from_user(card, &data, userdata, valuep, &type, &count); if (err < 0) goto error; @@ -344,12 +327,11 @@ static int ctl_elem_write_user(struct snd_ctl_file *file, err = snd_power_wait(card, SNDRV_CTL_POWER_D0); if (err < 0) goto error; - err = snd_ctl_elem_write(card, file, data); + err = snd_ctl_elem_write(card, file, &data); if (err < 0) goto error; - err = copy_ctl_value_to_user(userdata, valuep, data, type, count); + err = copy_ctl_value_to_user(userdata, valuep, &data, type, count); error: - kfree(data); return err; } @@ -384,48 +366,44 @@ static int snd_ctl_elem_add_compat(struct snd_ctl_file *file, struct snd_ctl_elem_info32 __user *data32, int replace) { - struct snd_ctl_elem_info *data; + struct snd_ctl_elem_info data; int err; - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (! data) - return -ENOMEM; - + memset(&data, 0, sizeof(data)); err = -EFAULT; /* id, type, access, count */ \ - if (copy_from_user(&data->id, &data32->id, sizeof(data->id)) || - copy_from_user(&data->type, &data32->type, 3 * sizeof(u32))) + if (copy_from_user(&data.id, &data32->id, sizeof(data.id)) || + copy_from_user(&data.type, &data32->type, 3 * sizeof(u32))) goto error; - if (get_user(data->owner, &data32->owner)) + if (get_user(data.owner, &data32->owner)) goto error; - switch (data->type) { + switch (data.type) { case SNDRV_CTL_ELEM_TYPE_BOOLEAN: case SNDRV_CTL_ELEM_TYPE_INTEGER: - if (get_user(data->value.integer.min, &data32->value.integer.min) || - get_user(data->value.integer.max, &data32->value.integer.max) || - get_user(data->value.integer.step, &data32->value.integer.step)) + if (get_user(data.value.integer.min, &data32->value.integer.min) || + get_user(data.value.integer.max, &data32->value.integer.max) || + get_user(data.value.integer.step, &data32->value.integer.step)) goto error; break; case SNDRV_CTL_ELEM_TYPE_INTEGER64: - if (copy_from_user(&data->value.integer64, + if (copy_from_user(&data.value.integer64, &data32->value.integer64, - sizeof(data->value.integer64))) + sizeof(data.value.integer64))) goto error; break; case SNDRV_CTL_ELEM_TYPE_ENUMERATED: - if (copy_from_user(&data->value.enumerated, + if (copy_from_user(&data.value.enumerated, &data32->value.enumerated, - sizeof(data->value.enumerated))) + sizeof(data.value.enumerated))) goto error; - data->value.enumerated.names_ptr = - (uintptr_t)compat_ptr(data->value.enumerated.names_ptr); + data.value.enumerated.names_ptr = + (uintptr_t)compat_ptr(data.value.enumerated.names_ptr); break; default: break; } - err = snd_ctl_elem_add(file, data, replace); + err = snd_ctl_elem_add(file, &data, replace); error: - kfree(data); return err; } diff --git a/techpack/audio/asoc/Kbuild b/techpack/audio/asoc/Kbuild index 5e0106a2f345..bbc5f2c8a104 100644 --- a/techpack/audio/asoc/Kbuild +++ b/techpack/audio/asoc/Kbuild @@ -199,7 +199,6 @@ ifeq ($(TARGET_PRODUCT), $(filter $(TARGET_PRODUCT),cepheus hercules orion)) CDEFINES += -DCONFIG_SND_SOC_CS35L41_FOR_CEPH endif - KBUILD_CPPFLAGS += $(CDEFINES) # Currently, for versions of gcc which support it, the kernel Makefile diff --git a/techpack/audio/asoc/codecs/tfa98xx/src/tfa98xx.c b/techpack/audio/asoc/codecs/tfa98xx/src/tfa98xx.c index 4530012b2793..73b203b7aec7 100644 --- a/techpack/audio/asoc/codecs/tfa98xx/src/tfa98xx.c +++ b/techpack/audio/asoc/codecs/tfa98xx/src/tfa98xx.c @@ -745,14 +745,7 @@ static ssize_t tfa98xx_dbgfs_fw_state_get(struct file *file, return simple_read_from_buffer(user_buf, count, ppos, str, strlen(str)); } -#ifdef TFA_NON_DSP_SOLUTION extern int send_tfa_cal_apr(void *buf, int cmd_size, bool bRead); -#else -int send_tfa_cal_apr(void *buf, int cmd_size, bool bRead) -{ - return 0; -} -#endif static ssize_t tfa98xx_dbgfs_rpc_read(struct file *file, char __user *user_buf, size_t count, @@ -3558,6 +3551,8 @@ static int tfa98xx_misc_device_rpc_open(struct inode *inode, struct file *file) } } +extern int send_tfa_cal_apr(void *buf, int cmd_size, bool bRead); + static ssize_t tfa98xx_misc_device_rpc_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { diff --git a/techpack/audio/asoc/codecs/wcd-spi.c b/techpack/audio/asoc/codecs/wcd-spi.c index ef955dd866be..c73bc2ce2814 100644 --- a/techpack/audio/asoc/codecs/wcd-spi.c +++ b/techpack/audio/asoc/codecs/wcd-spi.c @@ -1392,7 +1392,7 @@ static int wcd_spi_component_bind(struct device *dev, } if (wcd_spi_debugfs_init(spi)) - dev_err(&spi->dev, "%s: Failed debugfs init\n", __func__); + dev_dbg(&spi->dev, "%s: Failed debugfs init\n", __func__); spi_message_init(&wcd_spi->msg1); spi_message_add_tail(&wcd_spi->xfer1, &wcd_spi->msg1); diff --git a/techpack/audio/config/sm8150auto.conf b/techpack/audio/config/sm8150auto.conf index 3bb8de2653c1..40686af82fd6 100644 --- a/techpack/audio/config/sm8150auto.conf +++ b/techpack/audio/config/sm8150auto.conf @@ -38,7 +38,7 @@ CONFIG_DTS_SRS_TM=y CONFIG_SND_SOC_MSM_STUB=y CONFIG_MSM_AVTIMER=y CONFIG_SND_SOC_MSM_HDMI_CODEC_RX=y -CONFIG_VOICE_MHI=y +# CONFIG_VOICE_MHI is not set CONFIG_SND_SOC_TFA9874=y CONFIG_SND_SOC_TAS2557=y CONFIG_TAS2557_REGMAP=y diff --git a/techpack/audio/config/sm8150autoconf.h b/techpack/audio/config/sm8150autoconf.h index 57c664d7979b..65dccbeb2ae1 100644 --- a/techpack/audio/config/sm8150autoconf.h +++ b/techpack/audio/config/sm8150autoconf.h @@ -51,11 +51,11 @@ #define CONFIG_SND_SOC_MSM_STUB 1 #define CONFIG_MSM_AVTIMER 1 #define CONFIG_SND_SOC_MSM_HDMI_CODEC_RX 1 -#define CONFIG_VOICE_MHI 1 +#define CONFIG_VOICE_MHI 0 +#define CONFIG_SND_SOC_TFA9874 1 #define CONFIG_SND_SOC_TAS2557 1 #define CONFIG_TAS2557_REGMAP 1 #define CONFIG_TAS2557_CODEC 1 #define CONFIG_TAS2557_MISC 1 #define CONFIG_SND_SOC_CS35L41 1 -#define CONFIG_SND_SOC_TFA9874 1 #define CONFIG_MSM_CSPL 1 diff --git a/techpack/audio/dsp/codecs/audio_alac.c b/techpack/audio/dsp/codecs/audio_alac.c index 1bc1a3eb530a..74effc293927 100644 --- a/techpack/audio/dsp/codecs/audio_alac.c +++ b/techpack/audio/dsp/codecs/audio_alac.c @@ -19,6 +19,7 @@ static struct miscdevice audio_alac_misc; static struct ws_mgr audio_alac_ws_mgr; +#ifdef CONFIG_DEBUG_FS static const struct file_operations audio_alac_debug_fops = { .read = audio_aio_debug_read, .open = audio_aio_debug_open, @@ -29,6 +30,7 @@ static struct dentry *config_debugfs_create_file(const char *name, void *data) return debugfs_create_file(name, S_IFREG | 0444, NULL, (void *)data, &audio_alac_debug_fops); } +#endif static int alac_channel_map(u8 *channel_mapping, uint32_t channels); @@ -268,8 +270,10 @@ static int audio_open(struct inode *inode, struct file *file) struct q6audio_aio *audio = NULL; int rc = 0; +#ifdef CONFIG_DEBUG_FS /* 4 bytes represents decoder number, 1 byte for terminate string */ char name[sizeof "msm_alac_" + 5]; +#endif audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL); if (!audio) @@ -330,7 +334,7 @@ static int audio_open(struct inode *inode, struct file *file) rc = -EACCES; goto fail; } - +#ifdef CONFIG_DEBUG_FS snprintf(name, sizeof(name), "msm_alac_%04x", audio->ac->session); audio->dentry = config_debugfs_create_file(name, (void *)audio); @@ -339,6 +343,7 @@ static int audio_open(struct inode *inode, struct file *file) pr_debug("%s:alacdec success mode[%d]session[%d]\n", __func__, audio->feedback, audio->ac->session); +#endif return rc; fail: q6asm_audio_client_free(audio->ac); diff --git a/techpack/audio/dsp/codecs/audio_ape.c b/techpack/audio/dsp/codecs/audio_ape.c index 3f81b535dad6..70432512870f 100644 --- a/techpack/audio/dsp/codecs/audio_ape.c +++ b/techpack/audio/dsp/codecs/audio_ape.c @@ -19,6 +19,7 @@ static struct miscdevice audio_ape_misc; static struct ws_mgr audio_ape_ws_mgr; +#ifdef CONFIG_DEBUG_FS static const struct file_operations audio_ape_debug_fops = { .read = audio_aio_debug_read, .open = audio_aio_debug_open, @@ -28,6 +29,7 @@ static struct dentry *config_debugfs_create_file(const char *name, void *data) return debugfs_create_file(name, S_IFREG | 0444, NULL, (void *)data, &audio_ape_debug_fops); } +#endif static long audio_ioctl_shared(struct file *file, unsigned int cmd, void *arg) @@ -250,8 +252,10 @@ static int audio_open(struct inode *inode, struct file *file) struct q6audio_aio *audio = NULL; int rc = 0; +#ifdef CONFIG_DEBUG_FS /* 4 bytes represents decoder number, 1 byte for terminate string */ char name[sizeof "msm_ape_" + 5]; +#endif audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL); if (!audio) @@ -313,6 +317,7 @@ static int audio_open(struct inode *inode, struct file *file) goto fail; } +#ifdef CONFIG_DEBUG_FS snprintf(name, sizeof(name), "msm_ape_%04x", audio->ac->session); audio->dentry = config_debugfs_create_file(name, (void *)audio); @@ -321,6 +326,7 @@ static int audio_open(struct inode *inode, struct file *file) pr_debug("%s:apedec success mode[%d]session[%d]\n", __func__, audio->feedback, audio->ac->session); +#endif return rc; fail: q6asm_audio_client_free(audio->ac); diff --git a/techpack/audio/dsp/codecs/audio_g711alaw.c b/techpack/audio/dsp/codecs/audio_g711alaw.c index fec46d24e2e8..56c6e3e05030 100644 --- a/techpack/audio/dsp/codecs/audio_g711alaw.c +++ b/techpack/audio/dsp/codecs/audio_g711alaw.c @@ -19,6 +19,7 @@ static struct miscdevice audio_g711alaw_misc; static struct ws_mgr audio_g711_ws_mgr; +#ifdef CONFIG_DEBUG_FS static const struct file_operations audio_g711_debug_fops = { .read = audio_aio_debug_read, .open = audio_aio_debug_open, @@ -29,6 +30,7 @@ static struct dentry *config_debugfs_create_file(const char *name, void *data) return debugfs_create_file(name, S_IFREG | 0444, NULL, (void *)data, &audio_g711_debug_fops); } +#endif static int g711_channel_map(u8 *channel_mapping, uint32_t channels); @@ -221,8 +223,11 @@ static int audio_open(struct inode *inode, struct file *file) { struct q6audio_aio *audio = NULL; int rc = 0; + +#ifdef CONFIG_DEBUG_FS /* 4 bytes represents decoder number, 1 byte for terminate string */ char name[sizeof "msm_g711_" + 5]; +#endif audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL); @@ -286,6 +291,7 @@ static int audio_open(struct inode *inode, struct file *file) goto fail; } +#ifdef CONFIG_DEBUG_FS snprintf(name, sizeof(name), "msm_g711_%04x", audio->ac->session); audio->dentry = config_debugfs_create_file(name, (void *)audio); @@ -294,6 +300,7 @@ static int audio_open(struct inode *inode, struct file *file) pr_debug("%s: g711dec success mode[%d]session[%d]\n", __func__, audio->feedback, audio->ac->session); +#endif return rc; fail: q6asm_audio_client_free(audio->ac); diff --git a/techpack/audio/dsp/codecs/audio_g711mlaw.c b/techpack/audio/dsp/codecs/audio_g711mlaw.c index c27768ae269f..ed7cf699d408 100644 --- a/techpack/audio/dsp/codecs/audio_g711mlaw.c +++ b/techpack/audio/dsp/codecs/audio_g711mlaw.c @@ -19,6 +19,7 @@ static struct miscdevice audio_g711mlaw_misc; static struct ws_mgr audio_g711_ws_mgr; +#ifdef CONFIG_DEBUG_FS static const struct file_operations audio_g711_debug_fops = { .read = audio_aio_debug_read, .open = audio_aio_debug_open, @@ -29,6 +30,7 @@ static struct dentry *config_debugfs_create_file(const char *name, void *data) return debugfs_create_file(name, S_IFREG | 0444, NULL, (void *)data, &audio_g711_debug_fops); } +#endif static int g711_channel_map(u8 *channel_mapping, uint32_t channels); @@ -220,8 +222,11 @@ static int audio_open(struct inode *inode, struct file *file) { struct q6audio_aio *audio = NULL; int rc = 0; + +#ifdef CONFIG_DEBUG_FS /* 4 bytes represents decoder number, 1 byte for terminate string */ char name[sizeof "msm_g711_" + 5]; +#endif audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL); @@ -285,6 +290,7 @@ static int audio_open(struct inode *inode, struct file *file) goto fail; } +#ifdef CONFIG_DEBUG_FS snprintf(name, sizeof(name), "msm_g711_%04x", audio->ac->session); audio->dentry = config_debugfs_create_file(name, (void *)audio); @@ -293,6 +299,7 @@ static int audio_open(struct inode *inode, struct file *file) pr_debug("%s: g711dec success mode[%d]session[%d]\n", __func__, audio->feedback, audio->ac->session); +#endif return rc; fail: q6asm_audio_client_free(audio->ac); diff --git a/techpack/audio/dsp/q6_init.c b/techpack/audio/dsp/q6_init.c index b2cb5a17a9d9..66643a044bf0 100644 --- a/techpack/audio/dsp/q6_init.c +++ b/techpack/audio/dsp/q6_init.c @@ -36,7 +36,9 @@ static int __init audio_q6_init(void) crus_sp_init(); #endif msm_mdf_init(); +#if CONFIG_VOICE_MHI voice_mhi_init(); +#endif elliptic_driver_init(); /* for mius start */ #ifdef CONFIG_US_PROXIMITY @@ -65,7 +67,9 @@ static void __exit audio_q6_exit(void) rtac_exit(); audio_cal_exit(); adsp_err_exit(); +#if CONFIG_VOICE_MHI voice_mhi_exit(); +#endif elliptic_driver_exit(); /* for mius start */ #ifdef CONFIG_US_PROXIMITY diff --git a/techpack/audio/dsp/q6afe.c b/techpack/audio/dsp/q6afe.c index d53d60cf149c..3eac0d4b66ac 100644 --- a/techpack/audio/dsp/q6afe.c +++ b/techpack/audio/dsp/q6afe.c @@ -580,7 +580,7 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv) uint32_t *payload = data->payload; uint32_t param_id; -#ifdef CONFIG_MSM_CSPL +#if CONFIG_MSM_CSPL if (crus_afe_callback(data->payload, data->payload_size) == 0) return 0; #endif @@ -8647,7 +8647,7 @@ static void afe_release_uevent_data(struct kobject *kobj) #ifdef CONFIG_SND_SOC_TFA9874_FOR_DAVI -int send_tfa_cal_apr(void *buf, int cmd_size, bool bRead) +extern int send_tfa_cal_apr(void *buf, int cmd_size, bool bRead) { int32_t result = 0, port_id = AFE_PORT_ID_TFADSP_RX; uint32_t port_index = 0, payload_size = 0; @@ -8769,7 +8769,6 @@ int send_tfa_cal_apr(void *buf, int cmd_size, bool bRead) err: return result; } -EXPORT_SYMBOL(send_tfa_cal_apr); void send_tfa_cal_unmap_memory(void) { diff --git a/techpack/audio/dsp/q6asm.c b/techpack/audio/dsp/q6asm.c index 957a8546c708..6fc8694172d7 100644 --- a/techpack/audio/dsp/q6asm.c +++ b/techpack/audio/dsp/q6asm.c @@ -146,10 +146,10 @@ struct generic_get_data_ { }; static struct generic_get_data_ *generic_get_data; -#ifdef CONFIG_DEBUG_FS #define OUT_BUFFER_SIZE 56 #define IN_BUFFER_SIZE 24 +#ifdef CONFIG_DEBUG_FS static struct timeval out_cold_tv; static struct timeval out_warm_tv; static struct timeval out_cont_tv; @@ -163,6 +163,7 @@ static int in_cont_index; static int out_cold_index; static char *out_buffer; static char *in_buffer; +#endif static uint32_t adsp_reg_event_opcode[] = { ASM_STREAM_CMD_REGISTER_PP_EVENTS, @@ -278,6 +279,7 @@ uint8_t q6asm_get_stream_id_from_token(uint32_t token) } EXPORT_SYMBOL(q6asm_get_stream_id_from_token); +#ifdef CONFIG_DEBUG_FS static int audio_output_latency_dbgfs_open(struct inode *inode, struct file *file) { diff --git a/techpack/audio/dsp/q6voice.c b/techpack/audio/dsp/q6voice.c index fa42543a2532..bf51c888062b 100644 --- a/techpack/audio/dsp/q6voice.c +++ b/techpack/audio/dsp/q6voice.c @@ -29,7 +29,9 @@ #include #include #include "adsp_err.h" +#if CONFIG_VOICE_MHI #include +#endif #define TIMEOUT_MS 300 @@ -6892,11 +6894,12 @@ int voc_end_voice_call(uint32_t session_id) voice_destroy_mvm_cvs_session(v); +#if CONFIG_VOICE_MHI ret = voice_mhi_end(); if (ret < 0) pr_debug("%s: voice_mhi_end failed! %d\n", __func__, ret); - +#endif v->voc_state = VOC_RELEASE; } else { pr_err("%s: Error: End voice called in state %d\n", @@ -7232,12 +7235,14 @@ int voc_start_voice_call(uint32_t session_id) __func__, ret); } +#if CONFIG_VOICE_MHI ret = voice_mhi_start(); if (ret < 0) { pr_debug("%s: voice_mhi_start failed! %d\n", __func__, ret); goto fail; } +#endif ret = voice_create_mvm_cvs_session(v); if (ret < 0) { diff --git a/techpack/audio/include/elliptic/elliptic_device.h b/techpack/audio/include/elliptic/elliptic_device.h index cd113c5cad69..b48184c4808f 100644 --- a/techpack/audio/include/elliptic/elliptic_device.h +++ b/techpack/audio/include/elliptic/elliptic_device.h @@ -48,7 +48,7 @@ extern struct class *elliptic_class; pr_warn("[ELUS] : (%s) : " string "\n", __func__, ##arg) #define EL_PRINT_I(string, arg...) \ - pr_info("[ELUS] : (%s) : " string "\n", __func__, ##arg) + pr_debug("[ELUS] : (%s) : " string "\n", __func__, ##arg) #define EL_PRINT_D(string, arg...) \ pr_debug("[ELUS] : (%s) : " string "\n", __func__, ##arg) diff --git a/techpack/audio/ipc/apr.c b/techpack/audio/ipc/apr.c index b66bd6e63cb6..92e68d229552 100644 --- a/techpack/audio/ipc/apr.c +++ b/techpack/audio/ipc/apr.c @@ -41,7 +41,7 @@ static struct apr_q6 q6; static struct apr_client client[APR_DEST_MAX][APR_CLIENT_MAX]; -static void *apr_pkt_ctx; +static void __maybe_unused *apr_pkt_ctx; static wait_queue_head_t modem_wait; static bool is_modem_up; static char *subsys_name = NULL; @@ -89,12 +89,7 @@ static const struct file_operations apr_debug_ops = { }; #endif -#define APR_PKT_INFO(x...) \ -do { \ - if (apr_pkt_ctx) \ - ipc_log_string(apr_pkt_ctx, ": "x); \ -} while (0) - +#define APR_PKT_INFO(x...) ((void)0) struct apr_svc_table { char name[64]; @@ -733,7 +728,7 @@ void apr_cb_func(void *buf, int len, void *priv) if (unlikely(apr_cf_debug)) { if (hdr->opcode == APR_BASIC_RSP_RESULT && data.payload) { - uint32_t *ptr = data.payload; + uint32_t __maybe_unused *ptr = data.payload; APR_PKT_INFO( "Rx: src_addr[0x%X] dest_addr[0x%X] opcode[0x%X] token[0x%X] rc[0x%X]", @@ -1124,9 +1119,9 @@ static int __init apr_debug_init(void) } #else static int __init apr_debug_init(void) -( +{ return 0; -) +} #endif static void apr_cleanup(void) @@ -1147,7 +1142,9 @@ static void apr_cleanup(void) mutex_destroy(&client[i][j].svc[k].m_lock); } } +#ifdef CONFIG_DEBUG_FS debugfs_remove(debugfs_apr_debug); +#endif } static int apr_probe(struct platform_device *pdev) @@ -1183,7 +1180,7 @@ static int apr_probe(struct platform_device *pdev) apr_pkt_ctx = ipc_log_context_create(APR_PKT_IPC_LOG_PAGE_CNT, "apr", 0); if (!apr_pkt_ctx) - pr_err("%s: Unable to create ipc log context\n", __func__); + pr_debug("%s: Unable to create ipc log context\n", __func__); spin_lock(&apr_priv->apr_lock); apr_priv->is_initial_boot = true; diff --git a/techpack/data/drivers/generic-sw-bridge/gsb_debugfs.h b/techpack/data/drivers/generic-sw-bridge/gsb_debugfs.h index 9ad59d8b4be4..748ac570cb2d 100644 --- a/techpack/data/drivers/generic-sw-bridge/gsb_debugfs.h +++ b/techpack/data/drivers/generic-sw-bridge/gsb_debugfs.h @@ -28,7 +28,7 @@ static void *ipc_gsb_log_ctxt_low; /* * Debug output verbosity level. */ -#define DEBUG_LEVEL 3 +#define DEBUG_LEVEL 0 #if (DEBUG_LEVEL < 1) #define DEBUG_ERROR(s, ...) diff --git a/techpack/data/drivers/rmnet/perf/rmnet_perf_config.c b/techpack/data/drivers/rmnet/perf/rmnet_perf_config.c index be245d33d9e6..7eb64b14f300 100644 --- a/techpack/data/drivers/rmnet/perf/rmnet_perf_config.c +++ b/techpack/data/drivers/rmnet/perf/rmnet_perf_config.c @@ -26,7 +26,7 @@ MODULE_LICENSE("GPL v2"); -unsigned int temp_debug __read_mostly = 1; +unsigned int temp_debug __read_mostly = 0; module_param(temp_debug, uint, 0644); MODULE_PARM_DESC(temp_debug, "temp_debug"); @@ -397,7 +397,7 @@ static int rmnet_perf_config_notify_cb(struct notifier_block *nb, switch (event) { case NETDEV_UNREGISTER: - pr_info("%s(): rmnet_perf netdevice unregister, name = %s\n", + pr_debug("%s(): rmnet_perf netdevice unregister, name = %s\n", __func__, dev->name); if (perf && rmnet_is_real_dev_registered(dev) && rmnet_perf_config_hook_registered() && @@ -419,7 +419,7 @@ static int rmnet_perf_config_notify_cb(struct notifier_block *nb, } break; case NETDEV_REGISTER: - pr_info("%s(): rmnet_perf netdevice register, name = %s\n", + pr_debug("%s(): rmnet_perf netdevice register, name = %s\n", __func__, dev->name); /* Check prevents us from allocating resources for every * interface