Synchronize codes for OnePlus 8 IN2013_11_F.13/OnePlus 8Pro IN2023_11_F.13 and OnePlus 8T KB2003_11_F.13

Change-Id: I7432dcd8d139edcb8ea1a5e6503950a528acc647
This commit is contained in:
pswbuild
2022-12-13 20:06:33 +08:00
committed by Michael Bestas
parent 4ca330d46a
commit cdf57c4ee1
864 changed files with 300617 additions and 1736 deletions

7
.gitignore vendored
View File

@@ -140,3 +140,10 @@ kernel/configs/android-*.cfg
#Ignoring Android.bp link file
#
Android.bp
# ln the wakelock_profiler
drivers/soc/oplus/owakelock
include/soc/oplus/oplus_wakelock_profiler.h
# ignore net/oplus_modules
net/oplus_modules

View File

@@ -208,7 +208,7 @@ $(KERNEL_HEADERS_INSTALL): $(KERNEL_OUT)
# Creating a dtb.img once the kernel is compiled if TARGET_KERNEL_APPEND_DTB is set to be false
$(INSTALLED_DTBIMAGE_TARGET): $(TARGET_PREBUILT_INT_KERNEL)
cat $(KERNEL_OUT)/arch/$(KERNEL_ARCH)/boot/dts/vendor/qcom/*.dtb > $@
cat $(KERNEL_OUT)/arch/$(KERNEL_ARCH)/boot/dts/vendor/*/*.dtb > $@
.PHONY: kerneltags
kerneltags: $(KERNEL_OUT) $(KERNEL_CONFIG)

79
Kconfig
View File

@@ -7,6 +7,85 @@ mainmenu "Linux/$(ARCH) $(KERNELVERSION) Kernel Configuration"
comment "Compiler: $(CC_VERSION_TEXT)"
#ifdef CONFIG_OPLUS_SYSTEM_KERNEL
config OPLUS_SYSTEM_KERNEL_QCOM
bool "config oplus system feature "
default y
help
define this config to Distinguish between qcom and mtk platform
config OPLUS_SYSTEM_KERNEL_MTK
bool "config oplus system feature "
default n
help
define this config to Distinguish between qcom and mtk platform
config OPLUS_SLUB_TEST
string
default "$(OPLUS_SLUB_TEST)"
config OPLUS_KASAN_TEST
string
default "$(OPLUS_KASAN_TEST)"
config OPLUS_KMEMLEAK_TEST
string
default "$(OPLUS_KMEMLEAK_TEST)"
config OPLUS_AGING_TEST
string
default "$(OPLUS_AGING_TEST)"
config OPLUS_PAGEOWNER_TEST
string
default "$(OPLUS_PAGEOWNER_TEST)"
config OPLUS_AGING_DEBUG
bool "oplus aging add config"
select IPC_LOGGING
select QCOM_RTB
select QCOM_RTB_SEPARATE_CPUS
select SCSI_LOGGING
select SCSI_UFSHCD_CMD_LOGGING
select KPROBES
select MHI_DEBUG
default y if OPLUS_AGING_TEST = "true"
default n
config OPLUS_KASAN_DEBUG
bool "oplus kasan debug"
select KASAN
select KASAN_GENERIC
#select KCOV
#select KCOV_ENABLE_COMPARISONS
default y if OPLUS_KASAN_TEST = "true"
default n
config OPLUS_KMEMLEAK_DEBUG
bool "oplus kmemleak debug"
select DEBUG_KMEMLEAK
select SLUB_DEBUG
select SLABTRACE_DEBUG
default y if OPLUS_KMEMLEAK_TEST = "true"
default n
config OPLUS_SLUB_DEBUG
bool "oplus slub debug"
select SLUB_DEBUG
select SLUB_DEBUG_ON
select SLUB_DEBUG_PANIC_ON
default y if OPLUS_SLUB_TEST = "true"
default n
config OPLUS_PAGEOWNER_DEBUG
bool "oplus pageowner debug"
select PAGE_OWNER
select PAGE_OWNER_ENABLE_DEFAULT
default y if OPLUS_PAGEOWNER_TEST = "true"
default n
#endif /*CONFIG_OPLUS_SYSTEM_KERNEL*/
source "scripts/Kconfig.include"
source "init/Kconfig"

View File

@@ -450,7 +450,82 @@ KBUILD_LDFLAGS :=
GCC_PLUGINS_CFLAGS :=
CLANG_FLAGS :=
export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC
# ifdef VENDOR_EDIT
KBUILD_CFLAGS += -DVENDOR_EDIT
KBUILD_CPPFLAGS += -DVENDOR_EDIT
CFLAGS_KERNEL += -DVENDOR_EDIT
CFLAGS_MODULE += -DVENDOR_EDIT
# endif
ifeq ($(BRAND_SHOW_FLAG),oneplus)
KBUILD_CFLAGS += -DOPLUS_CUSTOM_OP_DEF
endif
# ifdef OPLUS_FEATURE_POWER_EFFICIENCY
KBUILD_CFLAGS += -DOPLUS_FEATURE_POWER_EFFICIENCY
KBUILD_CPPFLAGS += -DOPLUS_FEATURE_POWER_EFFICIENCY
CFLAGS_KERNEL += -DOPLUS_FEATURE_POWER_EFFICIENCY
CFLAGS_MODULE += -DOPLUS_FEATURE_POWER_EFFICIENCY
# endif
-include OplusKernelEnvConfig.mk
#ifdef VENDOR_EDIT
ifneq (,$(findstring Aging,$(SPECIAL_VERSION)))
OPLUS_F2FS_DEBUG := true
endif
export OPLUS_F2FS_DEBUG
#endif /* VENDOR_EDIT */
#ifdef OPLUS_BUG_STABILITY
#Add for Debug Config, slub/kmemleak/kasan config
ifeq ($(AGING_DEBUG_MASK),1)
#Agingtest enable rtb
OPLUS_MEMLEAK_DETECT := true
OPLUS_AGING_TEST := true
endif
ifeq ($(AGING_DEBUG_MASK),2)
#enable kasan
OPLUS_KASAN_TEST := true
endif
ifeq ($(AGING_DEBUG_MASK),3)
#enable kmemleak
OPLUS_KMEMLEAK_TEST := true
endif
ifeq ($(AGING_DEBUG_MASK),4)
#enable rtb
OPLUS_AGING_TEST := true
#enable kasan
OPLUS_SLUB_TEST := true
endif
ifeq ($(AGING_DEBUG_MASK),5)
#enable rtb
OPLUS_AGING_TEST := true
#enable kasan
OPLUS_PAGEOWNER_TEST := true
endif
export OPLUS_AGING_TEST OPLUS_KASAN_TEST OPLUS_KMEMLEAK_TEST OPLUS_SLUB_TEST OPLUS_PAGEOWNER_TEST
#endif
#ifdef OPLUS_FEATURE_MEMLEAK_DETECT
#Add for memleak test
ifeq ($(TARGET_MEMLEAK_DETECT_TEST),0)
OPLUS_MEMLEAK_DETECT := false
else ifeq ($(TARGET_MEMLEAK_DETECT_TEST),1)
OPLUS_MEMLEAK_DETECT := true
OPLUS_SLUB_TEST := true
endif
#Add for memleak test
export OPLUS_MEMLEAK_DETECT
#endif
export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC
export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS
export MAKE LEX YACC AWK GENKSYMS INSTALLKERNEL PERL PYTHON PYTHON2 PYTHON3 UTS_MACHINE
export HOSTCXX KBUILD_HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS

189
OplusKernelEnvConfig.mk Normal file
View File

@@ -0,0 +1,189 @@
# Copyright (C), 2008-2030, OPLUS Mobile Comm Corp., Ltd
### All rights reserved.
###
### File: - OplusKernelEnvConfig.mk
### Description:
### you can get the oplus feature variables set in android side in this file
### this file will add global macro for common oplus added feature
### BSP team can do customzation by referring the feature variables
### Version: 1.0
### Date: 2020-03-18
### Author: Liang.Sun
###
### ------------------------------- Revision History: ----------------------------
### <author> <date> <version> <desc>
### ------------------------------------------------------------------------------
##################################################################################
-include oplus_native_features.mk
###ifdef OPLUS_ARCH_INJECT
OPLUS_CONNECTIVITY_NATIVE_FEATURE_SET :=
##Add OPLUS Debug/Feature Macro Support for kernel/driver
##ifeq ($(OPLUS_FEATURE_TEST), yes)
## OPLUS_CONNECTIVITY_NATIVE_FEATURE_SET += OPLUS_FEATURE_TEST
##endif
ifeq ($(OPLUS_FEATURE_WIFI_MTUDETECT), yes)
OPLUS_CONNECTIVITY_NATIVE_FEATURE_SET += OPLUS_FEATURE_WIFI_MTUDETECT
endif
$(foreach myfeature,$(OPLUS_CONNECTIVITY_NATIVE_FEATURE_SET),\
$( \
$(eval KBUILD_CFLAGS += -D$(myfeature)) \
$(eval KBUILD_CPPFLAGS += -D$(myfeature)) \
$(eval CFLAGS_KERNEL += -D$(myfeature)) \
$(eval CFLAGS_MODULE += -D$(myfeature)) \
) \
)
###endif OPLUS_ARCH_INJECT
ALLOWED_MCROS := OPLUS_FEATURE_FG_IO_OPT \
OPLUS_FEATURE_SPECIALOPT \
OPLUS_FEATURE_PERFORMANCE \
OPLUS_FEATURE_STORAGE_TOOL \
OPLUS_FEATURE_UFS_DRIVER \
OPLUS_FEATURE_UFS_SHOW_LATENCY \
OPLUS_FEATURE_UFSPLUS \
OPLUS_FEATURE_PADL_STATISTICS \
OPLUS_FEATURE_EMMC_SDCARD_OPTIMIZE \
OPLUS_FEATURE_EMMC_DRIVER \
OPLUS_FEATURE_HEALTHINFO \
OPLUS_FEATURE_TASK_CPUSTATS \
OPLUS_FEATURE_HANS_FREEZE \
OPLUS_FEATURE_SCHED_ASSIST \
OPLUS_FEATURE_IOMONITOR \
OPLUS_FEATURE_TP_BSPFWUPDATE \
OPLUS_FEATURE_LOWMEM_DBG \
OPLUS_FEATURE_QCOM_PMICWD \
OPLUS_FEATURE_CHG_BASIC \
OPLUS_FEATURE_NWPOWER \
OPLUS_FEATURE_WIFI_BDF \
OPLUS_FEATURE_CONNFCSOFT \
OPLUS_FEATURE_AGINGTEST \
OPLUS_FEATURE_SENSOR_SMEM \
OPLUS_FEATURE_SSR \
OPLUS_FEATURE_TP_BASIC \
OPLUS_FEATURE_EDTASK_IMPROVE \
OPLUS_FEATURE_WIFI_SLA \
OPLUS_FEATURE_WIFI_ROUTERBOOST \
OPLUS_FEATURE_IPV6_OPTIMIZE \
OPLUS_FEATURE_DATA_EVAL \
OPLUS_FEATURE_DHCP \
OPLUS_FEATURE_PHOENIX \
OPLUS_FEATURE_PHOENIX_REBOOT_SPEED \
OPLUS_FEATURE_KMSG_WB \
OPLUS_FEATURE_SHUTDOWN_SPEED \
OPLUS_FEATURE_OLC \
OPLUS_FEATURE_DUMPDEVICE \
OPLUS_FEATURE_SAUPWK \
OPLUS_FEATURE_MEMORY_ISOLATE \
OPLUS_FEATURE_MULTI_KSWAPD \
OPLUS_FEATURE_WIFI_MTUDETECT \
OPLUS_FEATURE_XTOWNER_INPUT \
OPLUS_FEATURE_SELINUX_CONTROL_LOG \
OPLUS_FEATURE_PXLW_IRIS5 \
OPLUS_FEATURE_MULTI_FREEAREA \
OPLUS_FEATURE_VIRTUAL_RESERVE_MEMORY \
OPLUS_FEATURE_GPU_MINIDUMP \
OPLUS_FEATURE_PROCESS_RECLAIM \
OPLUS_FEATURE_ZRAM_OPT \
OPLUS_FEATURE_AUDIO_FTM \
OPLUS_FEATURE_SPEAKER_MUTE \
OPLUS_FEATURE_MM_FEEDBACK \
OPLUS_FEATURE_MI2S_SLAVE \
OPLUS_FEATURE_KTV \
OPLUS_FEATURE_QCOM_WATCHDOG \
OPLUS_FEATURE_MEMLEAK_DETECT \
OPLUS_FEATURE_EXFAT_SUPPORT \
OPLUS_FEATURE_SDCARDFS_SUPPORT \
OPLUS_FEATURE_CAMERA_OIS \
OPLUS_BUG_COMPATIBILITY \
OPLUS_BUG_STABILITY \
OPLUS_BUG_DEBUG \
OPLUS_ARCH_INJECT \
OPLUS_ARCH_EXTENDS \
OPLUS_FEATURE_AUDIODETECT \
VENDOR_EDIT \
OPLUS_FEATURE_DC \
OPLUS_FEATURE_POWERINFO_STANDBY \
OPLUS_FEATURE_POWERINFO_RPMH \
OPLUS_FEATURE_CAMERA_COMMON \
OPLUS_FEATURE_ADSP_RECOVERY \
OPLUS_FEATURE_SMARTPA_PM \
OPLUS_FEATURE_IMPEDANCE_MATCH \
OPLUS_FEATURE_MODEM_MINIDUMP \
OPLUS_FEATURE_THEIA \
OPLUS_FEATURE_POWER_CPUFREQ \
OPLUS_FEATURE_MIDAS \
OPLUS_FEATURE_WIFI_OPLUSWFD \
OPLUS_FEATURE_WIFI_DUALSTA_AP_BLACKLIST \
OPLUS_FEATURE_WIFI_DCS_SWITCH \
OPLUS_FEATURE_IM \
OPLUS_FEATURE_TPD \
OPLUS_FEATURE_APP_MONITOR\
OPLUS_FEATURE_RT_INFO \
OPLUS_FEATURE_MIC_VA_MIC_CLK_SWITCH
ifeq ($(OPLUS_FEATURE_ADFR_KERNEL), yes)
$(warning add OPLUS_FEATURE_ADFR in kernel)
ALLOWED_MCROS += OPLUS_FEATURE_ADFR
endif
ifeq ($(OPLUS_FEATURE_GAMMA_SWITCH_KERNEL), yes)
$(warning add OPLUS_FEATURE_GAMMA_SWITCH_KERNEL in kernel)
ALLOWED_MCROS += OPLUS_FEATURE_GAMMA_SWITCH
endif
$(foreach myfeature,$(ALLOWED_MCROS),\
$(warning make $(myfeature) to be a macro here) \
$(eval KBUILD_CFLAGS += -D$(myfeature)) \
$(eval KBUILD_CPPFLAGS += -D$(myfeature)) \
$(eval CFLAGS_KERNEL += -D$(myfeature)) \
$(eval CFLAGS_MODULE += -D$(myfeature)) \
)
# BSP team can do customzation by referring the feature variables
ifeq ($(OPLUS_FEATURE_SECURE_GUARD),yes)
export CONFIG_OPLUS_SECURE_GUARD=y
KBUILD_CFLAGS += -DCONFIG_OPLUS_SECURE_GUARD
endif
ifeq ($(OPLUS_FEATURE_SECURE_ROOTGUARD),yes)
export CONFIG_OPLUS_ROOT_CHECK=y
KBUILD_CFLAGS += -DCONFIG_OPLUS_ROOT_CHECK
endif
ifeq ($(OPLUS_FEATURE_SECURE_MOUNTGUARD),yes)
export CONFIG_OPLUS_MOUNT_BLOCK=y
KBUILD_CFLAGS += -DCONFIG_OPLUS_MOUNT_BLOCK
endif
ifeq ($(OPLUS_FEATURE_SECURE_EXECGUARD),yes)
export CONFIG_OPLUS_EXECVE_BLOCK=y
KBUILD_CFLAGS += -DCONFIG_OPLUS_EXECVE_BLOCK
KBUILD_CFLAGS += -DCONFIG_OPLUS_EXECVE_REPORT
endif
ifeq ($(OPLUS_FEATURE_SECURE_KEVENTUPLOAD),yes)
export CONFIG_OPLUS_KEVENT_UPLOAD=y
KBUILD_CFLAGS += -DCONFIG_OPLUS_KEVENT_UPLOAD
endif
ifeq ($(OPLUS_FEATURE_SECURE_KEYINTERFACESGUARD),yes)
KBUILD_CFLAGS += -DOPLUS_DISALLOW_KEY_INTERFACES
endif
ifeq ($(OPLUS_FEATURE_AOD_RAMLESS),yes)
KBUILD_CFLAGS += -DOPLUS_FEATURE_AOD_RAMLESS
KBUILD_CPPFLAGS += -DOPLUS_FEATURE_AOD_RAMLESS
CFLAGS_KERNEL += -DOPLUS_FEATURE_AOD_RAMLESS
CFLAGS_MODULE += -DOPLUS_FEATURE_AOD_RAMLESS
endif
ifeq ($(OPLUS_FEATURE_OP_SPECIFIC_AUDIO_KERNEL),yes)
KBUILD_CFLAGS += -DOPLUS_FEATURE_OP_SPECIFIC_AUDIO_KERNEL
endif

View File

@@ -1008,6 +1008,15 @@ config RELR
well as compatible NM and OBJCOPY utilities (llvm-nm and llvm-objcopy
are compatible).
config ARCH_HAS_NONLEAF_PMD_YOUNG
bool
depends on PGTABLE_LEVELS > 2
help
Architectures that select this option are capable of setting the
accessed bit in non-leaf PMD entries when using them as part of linear
address translations. Page table walkers that clear the accessed bit
may use this capability to reduce their search space.
source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig"

View File

@@ -1421,6 +1421,7 @@ config RELOCATABLE
config RANDOMIZE_BASE
bool "Randomize the address of the kernel image"
depends on !KASAN
select ARM64_MODULE_PLTS if MODULES
select RELOCATABLE
help

View File

@@ -378,7 +378,9 @@ CONFIG_SMB1355_SLAVE_CHARGER=y
CONFIG_QPNP_QG=y
CONFIG_SMB1398_CHARGER=y
CONFIG_THERMAL=y
CONFIG_THERMAL_STATISTICS=y
#ifndef OPLUS_FEATURE_THERMAL_STATISTICS
#CONFIG_THERMAL_STATISTICS=y
#endif
CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=10000
CONFIG_THERMAL_WRITABLE_TRIPS=y
CONFIG_THERMAL_GOV_USER_SPACE=y
@@ -632,6 +634,10 @@ CONFIG_OVERLAY_FS=y
CONFIG_INCREMENTAL_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
#ifdef OPLUS_FEATURE_EXFAT_SUPPORT
CONFIG_NLS_UTF8=y
CONFIG_EXFAT_FS=y
# endif
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_EFIVAR_FS=y

View File

@@ -391,7 +391,9 @@ CONFIG_SMB1355_SLAVE_CHARGER=y
CONFIG_QPNP_QG=y
CONFIG_SMB1398_CHARGER=y
CONFIG_THERMAL=y
CONFIG_THERMAL_STATISTICS=y
#ifndef OPLUS_FEATURE_THERMAL_STATISTICS
#CONFIG_THERMAL_STATISTICS=y
#endif
CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=10000
CONFIG_THERMAL_WRITABLE_TRIPS=y
CONFIG_THERMAL_GOV_USER_SPACE=y
@@ -661,6 +663,10 @@ CONFIG_OVERLAY_FS=y
CONFIG_INCREMENTAL_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
#ifdef OPLUS_FEATURE_EXFAT_SUPPORT
CONFIG_NLS_UTF8=y
CONFIG_EXFAT_FS=y
# endif
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_EFIVAR_FS=y

View File

@@ -654,6 +654,10 @@ CONFIG_OVERLAY_FS=y
CONFIG_INCREMENTAL_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
#ifdef OPLUS_FEATURE_EXFAT_SUPPORT
CONFIG_NLS_UTF8=y
CONFIG_EXFAT_FS=y
# endif
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_ECRYPT_FS=y

View File

@@ -685,6 +685,10 @@ CONFIG_OVERLAY_FS=y
CONFIG_INCREMENTAL_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
#ifdef OPLUS_FEATURE_EXFAT_SUPPORT
CONFIG_NLS_UTF8=y
CONFIG_EXFAT_FS=y
# endif
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_EFIVAR_FS=y

View File

@@ -5,6 +5,7 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_IRQ_TIME_ACCOUNTING=y
CONFIG_SCHED_WALT=y
CONFIG_MMAP_LOCK_OPT=y
CONFIG_TASKSTATS=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
@@ -21,6 +22,10 @@ CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
#ifdef OPLUS_FEATURE_TASK_CPUSTATS
CONFIG_OPLUS_SCHED=y
CONFIG_OPLUS_CTP=y
#endif /* OPLUS_FEATURE_TASK_CPUSTATS */
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_BPF=y
CONFIG_SCHED_CORE_CTL=y
@@ -53,7 +58,7 @@ CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=8
CONFIG_SECCOMP=y
CONFIG_OKL4_GUEST=y
# CONFIG_UNMAP_KERNEL_AT_EL0 is not set
CONFIG_UNMAP_KERNEL_AT_EL0=y
CONFIG_ARM64_SSBD=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
@@ -82,6 +87,9 @@ CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_BOOST=y
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
#ifdef VENDOR_EDIT
CONFIG_CPU_FREQ_STAT=y
#endif
CONFIG_ARM_QCOM_CPUFREQ_HW=y
CONFIG_MSM_TZ_LOG=y
CONFIG_ARM64_CRYPTO=y
@@ -92,6 +100,10 @@ CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
CONFIG_PANIC_ON_REFCOUNT_ERROR=y
#ifdef OPLUS_FEATURE_UID_PERF
CONFIG_OPLUS_FEATURE_UID_PERF=y
#endif
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
@@ -105,7 +117,10 @@ CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BFQ_GROUP_IOSCHED=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_MEMORY_HOTPLUG=y
#ifdef OPLUS_DEBUG_STABILITY
##CONFIG_MEMORY_HOTPLUG=y
# CONFIG_MEMORY_HOTPLUG is not set
# endif
CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
CONFIG_MEMORY_HOTPLUG_MOVABLE_NODE=y
CONFIG_MEMORY_HOTREMOVE=y
@@ -283,6 +298,10 @@ CONFIG_MHI_NETDEV=y
CONFIG_MHI_UCI=y
CONFIG_MHI_SATELLITE=y
CONFIG_ZRAM=y
#ifdef OPLUS_FEATURE_ZRAM_WRITEBACK
#CONFIG_ZRAM_WRITEBACK=y
#CONFIG_ZWB_HANDLE=y
#endif /* OPLUS_FEATURE_ZRAM_WRITEBACK */
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
CONFIG_BLK_DEV_RAM=y
@@ -305,6 +324,16 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
#ifdef OPLUS_FEATURE_UFSPLUS
CONFIG_UFSFEATURE=y
# CONFIG_UFSHPB is not set
CONFIG_UFSTW=y
CONFIG_UFSTW_IGNORE_GUARANTEE_BIT=y
CONFIG_UFSTW_BOOT_ENABLED=y
CONFIG_UFSHID=y
CONFIG_UFSHID_POC=y
#endif /*OPLUS_FEATURE_UFSPLUS*/
CONFIG_SCSI_UFS_CRYPTO=y
CONFIG_SCSI_UFS_CRYPTO_QTI=y
CONFIG_MD=y
@@ -357,6 +386,47 @@ CONFIG_TABLET_USB_HANWANG=y
CONFIG_TABLET_USB_KBTAB=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_FTS=y
#ifdef OPLUS_FEATURE_TP_BASIC
CONFIG_TOUCHPANEL_SAMSUNG_S6SY791=y
CONFIG_TOUCHPANEL_SAMSUNG_S6SY792=y
CONFIG_TOUCHPANEL_SAMSUNG_S6SY771=y
CONFIG_TOUCHPANEL_SYNAPTICS=y
CONFIG_TOUCHPANEL_SYNAPTICS_TCM_ONCELL=y
CONFIG_TOUCHPANEL_SYNAPTICS_S3706=y
CONFIG_TOUCHPANEL_SAMSUNG=y
CONFIG_TOUCHPANEL_OPLUS=y
CONFIG_TOUCHPANEL_GOODIX=y
CONFIG_TOUCHPANEL_GOODIX_GT9886=y
CONFIG_TOUCHPANEL_FOCAL=y
CONFIG_TOUCHPANEL_FOCAL_FT3658U=y
CONFIG_TOUCHIRQ_UPDATE_QOS=y
CONFIG_TOUCHPANEL_NEW_SET_IRQ_WAKE=y
CONFIG_OPLUS_TP_APK=y
CONFIG_TOUCHPANEL_ALGORITHM=y
#endif /* OPLUS_FEATURE_TP_BASIC */
CONFIG_OPLUS_TRIKEY=y
CONFIG_OPLUS_TRIKEY_MAIN=y
CONFIG_OPLUS_TRIKEY_HALL=y
CONFIG_IST_UP=y
CONFIG_IST_DOWN=y
CONFIG_MXM_UP=y
CONFIG_MXM_DOWN=y
#ifdef OPLUS_FEATURE_TP_BASIC
CONFIG_OPLUS_FW_UPDATE=y
CONFIG_TOUCHPANEL_NOVA=y
CONFIG_TOUCHPANEL_NOVA_NT36523_NOFLASH=y
CONFIG_TOUCHPANEL_NT_PEN_SUPPORT=y
CONFIG_TOUCHPANEL_NT_DIGITALNOISE_TEST=y
#endif /*OPLUS_FEATURE_TP_BASIC*/
#ifdef OPLUS_FEATURE_TP_BASIC
CONFIG_COLOR_CTRL=y
#endif /* OPLUS_FEATURE_TP_BASIC */
#ifdef OPLUS_FEATURE_TP_BASIC
CONFIG_COLOR_CTRL_V2=y
#endif /* OPLUS_FEATURE_TP_BASIC */
CONFIG_INPUT_MISC=y
CONFIG_INPUT_QPNP_POWER_ON=y
CONFIG_INPUT_QTI_HAPTICS=y
@@ -365,6 +435,7 @@ CONFIG_INPUT_UINPUT=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVMEM is not set
CONFIG_SERIAL_MSM_GENI=y
CONFIG_SERIAL_MSM_GENI_CONSOLE=y
CONFIG_SERIAL_MSM_GENI_HALF_SAMPLING=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
@@ -390,11 +461,43 @@ CONFIG_POWER_RESET_QCOM=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
CONFIG_QPNP_SMB5=y
CONFIG_SMB1390_CHARGE_PUMP_PSY=y
CONFIG_SMB1355_SLAVE_CHARGER=y
CONFIG_QPNP_QNOVO5=y
#ifdef OPLUS_FEATURE_CHG_BASIC//tongfeng.Huang@ProDrv.CHG,add 2019/06/22 for charger
CONFIG_OPLUS_SM8250_CHARGER=y
CONFIG_OPLUS_CHIP_SOC_NODE=y
CONFIG_QPNP_FG_GEN4=y
CONFIG_HL6111R=y
CONFIG_OPLUS_CHARGER_WIRELESS_PEN=y
CONFIG_OPLUS_CHARGER_WIRELESS_RA9530=y
#else
#CONFIG_SMB1390_CHARGE_PUMP_PSY=y
#CONFIG_SMB1355_SLAVE_CHARGER=y
#CONFIG_QPNP_QNOVO5=y
#CONFIG_QPNP_FG_GEN4=y
#endif
#ifdef OPLUS_FEATURE_CHG_BASIC
CONFIG_OPLUS_SHORT_HW_CHECK=y
CONFIG_OPLUS_SHORT_USERSPACE=y
CONFIG_OPLUS_SHIP_MODE_SUPPORT=y
CONFIG_OPLUS_SMART_CHARGER_SUPPORT=y
CONFIG_OPLUS_CALL_MODE_SUPPORT=y
CONFIG_OPLUS_SHORT_C_BATT_CHECK=y
CONFIG_OPLUS_CHECK_CHARGERID_VOLT=y
CONFIG_OPLUS_SHORT_IC_CHECK=y
CONFIG_SONY_FF=y
CONFIG_HID_BETOP_FF=y
CONFIG_HID_PLAYSTATION=y
CONFIG_PLAYSTATION_FF=y
#endif
#ifndef OPLUS_FEATURE_CHG_BASIC//tongfeng.Huang@ProDrv.CHG,add 2019/12/22 for charger
#CONFIG_HL6111R=y
#endif
#ifdef OPLUS_FEATURE_CHG_BASIC
CONFIG_INPUT_JOYSTICK=y
CONFIG_JOYSTICK_XPAD=y
CONFIG_JOYSTICK_XPAD_FF=y
CONFIG_JOYSTICK_XPAD_LEDS=y
#endif /* OPLUS_FEATURE_CHG_BASIC */
CONFIG_THERMAL=y
CONFIG_THERMAL_WRITABLE_TRIPS=y
CONFIG_THERMAL_GOV_USER_SPACE=y
@@ -442,6 +545,9 @@ CONFIG_DVB_MPQ_SW=y
CONFIG_VIDEO_V4L2_VIDEOBUF2_CORE=y
CONFIG_I2C_RTC6226_QCA=y
CONFIG_DRM=y
#ifdef VENDOR_EDIT
CONFIG_DRM_MSM=y
#endif /* VENDOR_EDIT */
CONFIG_DRM_LONTIUM_LT9611UXC=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
@@ -570,8 +676,10 @@ CONFIG_MSM_QMP=y
CONFIG_IOMMU_IO_PGTABLE_FAST=y
CONFIG_ARM_SMMU=y
CONFIG_QCOM_LAZY_MAPPING=y
CONFIG_IOMMU_DEBUG=y
CONFIG_IOMMU_TESTS=y
#ifdef VENDOR_EDIT
#CONFIG_IOMMU_DEBUG=y
#CONFIG_IOMMU_TESTS=y
#endif
CONFIG_RPMSG_CHAR=y
CONFIG_RPMSG_QCOM_GLINK_SMEM=y
CONFIG_RPMSG_QCOM_GLINK_SPSS=y
@@ -673,11 +781,13 @@ CONFIG_ESOC_CLIENT=y
CONFIG_ESOC_MDM_4x=y
CONFIG_ESOC_MDM_DRV=y
CONFIG_SENSORS_SSC=y
CONFIG_SENSORS_SIMULATED_HALL=y
CONFIG_QCOM_KGSL=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_OPLUS_FEATURE_OEXT4=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
@@ -691,6 +801,13 @@ CONFIG_OVERLAY_FS=y
CONFIG_INCREMENTAL_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
#ifdef OPLUS_FEATURE_EXFAT_SUPPORT
CONFIG_NLS_UTF8=y
CONFIG_EXFAT_FS=y
# endif
#ifdef OPLUS_FEATURE_EMMC_SDCARD_OPTIMIZE
CONFIG_EMMC_SDCARD_OPTIMIZE=y
#endif
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_ECRYPT_FS=y
@@ -737,3 +854,283 @@ CONFIG_CORESIGHT_HWEVENT=y
CONFIG_CORESIGHT_DUMMY=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_TGU=y
#ifdef OPLUS_SYSTEM_KERNEL
CONFIG_OPLUS_COMMON_SOFT=y
CONFIG_OPLUS_DEVICE_IFNO=y
CONFIG_OPLUS_RF_CABLE_MONITOR=y
CONFIG_RECORD_MDMRST=y
#endif
# ifdef OPLUS_FEATURE_POWERINFO_STANDBY
CONFIG_OPLUS_WAKELOCK_PROFILER=y
#endif /* OPLUS_FEATURE_POWERINFO_STANDBY */
# ifdef VENDOR_EDIT
CONFIG_CS_F61_NDT=y
# endif /* VENDOR_EDIT */
#ifdef OPLUS_FEATURE_INPUT_BOOST_V4
# CONFIG_OPLUS_FEATURE_INPUT_BOOST_V4 is not set
#endif
#ifdef OPLUS_SYSTEM_KERNEL
#all system oplus feature writer here
CONFIG_OPLUS_FEATURE_UBOOT_LOG=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_OPLUS_FEATURE_HUNG_TASK_ENHANCE=y
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=60
CONFIG_OPLUS_FEATURE_FEEDBACK=y
CONFIG_OPLUS_FEATURE_OPROJECT=y
CONFIG_OPLUS_FEATURE_CMDLINE=y
CONFIG_OPLUS_FEATURE_PROJECTINFO=y
CONFIG_OPLUS_FEATURE_PHOENIX=y
CONFIG_OPLUS_FEATURE_PHOENIX_REBOOT_SPEED=y
CONFIG_OPLUS_FEATURE_KMSG_WB=y
CONFIG_OPLUS_FEATURE_SHUTDOWN_SPEED=y
CONFIG_OPLUS_FEATURE_OLC=y
CONFIG_KPROBES=y
CONFIG_KRETPROBES=y
CONFIG_OPLUS_FEATURE_FDLEAK_CHECK=y
CONFIG_OPLUS_FEATURE_DUMP_DEVICE_INFO=y
#add for qcom minidump customized
CONFIG_OPLUS_FEATURE_QCOM_MINIDUMP_ENHANCE=y
# Add for shutdown detect
CONFIG_OPLUS_FEATURE_SHUTDOWN_DETECT=y
#add slabtrace function
#CONFIG_OPLUS_FEATURE_SLABTRACE_DEBUG=y
#endif
#ifdef OPLUS_FEATURE_QCOM_PMICWD
#Add for qcom pmic watchdog
CONFIG_OPLUS_FEATURE_QCOM_PMICWD=y
#endif
#ifdef OPLUS_FEATURE_FINGERPRINT
CONFIG_OPLUS_FINGERPRINT=y
CONFIG_OPLUS_FINGERPRINT_QCOM=y
CONFIG_OPLUS_FINGERPRINT_GOODIX_OPTICAL=y
CONFIG_OPLUS_FINGERPRINT_JIIOV_OPTICAL=y
#endif
CONFIG_OPLUS_FINGERPRINT_GKI_DISABLE=y
#ifdef OPLUS_FEATURE_SECURITY_COMMON
CONFIG_OPLUS_SECURE=y
CONFIG_OPLUS_SECURE_QCOM=y
CONFIG_OPLUS_SECURE_COMMON=y
#endif /* OPLUS_FEATURE_SECURITY_COMMON */
#ifdef OPLUS_FEATURE_HANS_FREEZE
CONFIG_OPLUS_HANS=y
#endif /*OPLUS_FEATURE_HANS_FREEZE*/
#ifdef VENDOR_EDIT//Qingjun.Wang@BSP.Haptic,add 2020/03/17 for vib aw8697
CONFIG_AW8697_HAPTIC=y
CONFIG_HAPTIC_FEEDBACK=y
#endif
#ifdef OPLUS_FEATURE_HEALTHINFO
CONFIG_OPLUS_MEM_MONITOR=y
CONFIG_FG_TASK_UID=y
CONFIG_OPLUS_HEALTHINFO=y
CONFIG_SLUB_DEBUG=y
#endif /*OPLUS_FEATURE_HEALTHINFO*/
#ifdef OPLUS_FEATURE_HEALTHINFO
CONFIG_OPLUS_JANK_INFO=y
#endif /* OPLUS_FEATURE_HEALTHINFO */
#ifdef OPLUS_FEATURE_SCHED_ASSIST
CONFIG_OPLUS_FEATURE_AUDIO_OPT=y
#endif
#ifdef OPLUS_FEATURE_ZRAM_OPT
CONFIG_OPLUS_ZRAM_OPT=y
CONFIG_CRYPTO_LZ4=y
CONFIG_PGTABLE_MAPPING=y
CONFIG_CRYPTO_ZSTD=y
#endif
#ifdef OPLUS_FEATURE_MULTI_KSWAPD
CONFIG_OPLUS_MULTI_KSWAPD=y
CONFIG_KSWAPD_UNBIND_MAX_CPU=y
#endif
#ifdef VENDOR_EDIT
CONFIG_REGULATOR_PM8008=y
#ifdef OPLUS_FEATURE_DUMPDEVICE
CONFIG_PSTORE=y
CONFIG_PSTORE_CONSOLE=y
CONFIG_PSTORE_PMSG=y
CONFIG_PSTORE_RAM=y
#endif
CONFIG_OPLUS_FEATURE_EROFS=y
CONFIG_EROFS_FS=y
#ifdef OPLUS_FEATURE_IOMONITOR
CONFIG_IOMONITOR=y
#CONFIG_IOMONITOR_WITH_F2FS=n
#endif /*OPLUS_FEATURE_IOMONITOR*/
CONFIG_OPLUS_FEATURE_PMIC_MONITOR=y
CONFIG_OPLUS_FEATURE_OF2FS=y
CONFIG_OPLUS_FEATURE_PANIC_FLUSH=y
CONFIG_F2FS_BD_STAT=y
#CONFIG_F2FS_GRADING_SSR is not set
#ifdef OPLUS_FEATURE_LOWMEM_DBG
CONFIG_OPLUS_FEATURE_LOWMEM_DBG=y
#endif /* OPLUS_FEATURE_LOWMEM_DBG */
#ifdef OPLUS_FEATURE_SCHED_ASSIST
CONFIG_BLK_WBT_SQ=y
CONFIG_BLK_WBT=y
CONFIG_OPLUS_FEATURE_UXIO_FIRST=y
#CONFIG_OPLUS_FEATURE_SCHED_SPREAD=y
#endif /*OPLUS_FEATURE_SCHED_ASSIST*/
#ifdef OPLUS_FEATURE_MEMLEAK_DETECT
CONFIG_KMALLOC_DEBUG=y
CONFIG_VMALLOC_DEBUG=y
CONFIG_DUMP_TASKS_MEM=y
#endif /*OPLUS_FEATURE_MEMLEAK_DETECT*/
#ifdef OPLUS_FEATURE_MEMLEAK_DETECT
CONFIG_SVELTE=y
#endif
#ifdef OPLUS_FEATURE_SENSOR_DRIVER
CONFIG_OPLUS_SENSOR_FB_QC=y
#endif
#ifdef VEDOR_EDIT
CONFIG_OPLUS_FEATURE_RECORD_MDMRST=y
#endif
#ifdef OPLUS_ARCH_EXTENDS
CONFIG_OPLUS_FEATURE_MM_FEEDBACK=y
#endif
#ifdef VENDOR_EDIT
CONFIG_OPLUS_FEATURE_MIDAS=y
#endif
#ifdef CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE
CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE=y
#endif
#ifdef OPLUS_FEATURE_ACM
CONFIG_OPLUS_FEATURE_ACM=y
CONFIG_OPLUS_FEATURE_ACM_LOGGING=y
#endif /* OPLUS_FEATURE_ACM */
#ifdef OPLUS_BUG_STABILITY
CONFIG_HARDEN_BRANCH_PREDICTOR=y
#endif
#add by shaojun.zou for cobuck opt 2021/08/16
CONFIG_SCHED_WALT_COBUCK=y
#ifdef OPLUS_FEATURE_SIM_DETECT
CONFIG_SIM_DETECT=y
#endif
#ifdef OPLUS_FEATURE_SIM_DETECT
CONFIG_OEM_QMI=y
#endif
CONFIG_OPLUS_FEATURE_THEIA=y
##endif /* OPLUS_FEATURE_THEIA */
#ifdef OPLUS_FEATURE_ESIM
CONFIG_OPLUS_GPIO=y
#endif
#ifdef OPLUS_FEATURE_SAUPWK
CONFIG_OPLUS_FEATURE_SAUPWK=y
#endif /* OPLUS_FEATURE_SAUPWK */
#ifdef OPLUS_FEATURE_RT_INFO
CONFIG_OPLUS_FEATURE_RT_INFO=n
#endif
#ifdef OPLUS_FEATURE_TPP
#jacky.ho@optimization add for tpp module
CONFIG_OPLUS_FEATURE_TPP=y
#endif /* OPLUS_FEATURE_TPP */
#ifdef OPLUS_FEATURE_IM
#ted.lin@optimization add for im module
CONFIG_OPLUS_FEATURE_IM=y
#endif /* OPLUS_FEATURE_IM */
#ifdef OPLUS_FEATURE_ION_BOOSTPOOL
CONFIG_OPLUS_ION_BOOSTPOOL=y
#endif /* OPLUS_FEATURE_ION_BOOSTPOOL */
#ifdef OPLUS_FEATURE_TPD
CONFIG_OPLUS_FEATURE_TPD=y
#endif
#ifdef OPLUS_FEATURE_SENSOR
CONFIG_SSC_INTERACTIVE=y
#endif
#ifdef OPLUS_FEATURE_WIFI_SLA
#todo: need to change to m when GKI
CONFIG_OPLUS_FEATURE_WIFI_SLA=y
#endif /* OPLUS_FEATURE_WIFI_SLA */
#ifdef OPLUS_FEATURE_WIFI_ROUTERBOOST
CONFIG_OPLUS_FEATURE_WIFI_ROUTERBOOST=m
#endif /* OPLUS_FEATURE_WIFI_ROUTERBOOST */
CONFIG_DYNAMIC_TUNNING_SWAPPINESS=y
#ifdef OPLUS_FEATURE_DATA_EVAL
CONFIG_OPLUS_FEATURE_DATA_EVAL=y
#endif /* OPLUS_FEATURE_DATA_VAL */
#ifdef OPLUS_FEATURE_DATA_LIMIT
CONFIG_IFB=y
CONFIG_NET_SCH_MULTIQ=y
CONFIG_NET_SCH_NETEM=y
CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_FLOW=y
CONFIG_NET_EMATCH_CMP=y
CONFIG_NET_EMATCH_NBYTE=y
CONFIG_NET_EMATCH_META=y
CONFIG_NET_EMATCH_TEXT=y
CONFIG_NET_ACT_GACT=y
CONFIG_NET_ACT_MIRRED=y
CONFIG_NET_ACT_SKBEDIT=y
#endif /* OPLUS_FEATURE_DATA_LIMIT */
#CONFIG_NANDSWAP=y
#CONFIG_NANDSWAP_DEBUG=y
CONFIG_FRAME_WARN=4096
#ifdef OPLUS_FEATURE_HYBRIDSWAP
CONFIG_HYBRIDSWAP=y
CONFIG_HYBRIDSWAP_SWAPD=y
CONFIG_HYBRIDSWAP_CORE=y
#endif
#ifdef OPLUS_FEATURE_DNS_HOOK
CONFIG_OPLUS_FEATURE_DNS_HOOK=y
#endif /* OPLUS_FEATURE_DNS_HOOK */
#ifdef OPLUS_FEATURE_STATS_CALC
CONFIG_OPLUS_FEATURE_STATS_CALC=y
#endif /* OPLUS_FEATURE_STATS_CALC */
CONFIG_OPLUS_BINDER_STRATEGY=n
#ifdef OPLUS_FEATURE_MDMFEATURE
CONFIG_OPLUS_FEATURE_MDMFEATURE=y
#endif
#ifdef OPLUS_FEATURE_GAME_OPT
CONFIG_OPLUS_FEATURE_GAME_OPT=y
#endif
#ifdef Multi-gen LRU
CONFIG_LRU_GEN=y
CONFIG_LRU_GEN_ENABLED=n
CONFIG_LRU_GEN_STATS=n
#endif
CONFIG_OPLUS_FEATURE_CPU_JANKINFO=y
CONFIG_CRYPTO_CHACHA20POLY1305=y

View File

@@ -4,6 +4,7 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_IRQ_TIME_ACCOUNTING=y
CONFIG_SCHED_WALT=y
CONFIG_MMAP_LOCK_OPT=y
CONFIG_TASKSTATS=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
@@ -22,11 +23,18 @@ CONFIG_BLK_CGROUP=y
CONFIG_DEBUG_BLK_CGROUP=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
#ifdef OPLUS_FEATURE_TASK_CPUSTATS
CONFIG_OPLUS_SCHED=y
CONFIG_OPLUS_CTP=y
#endif /* OPLUS_FEATURE_TASK_CPUSTATS */
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_BPF=y
CONFIG_CGROUP_DEBUG=y
CONFIG_SCHED_CORE_CTL=y
CONFIG_NAMESPACES=y
#ifdef OPLUS_FEATURE_UID_PERF
CONFIG_OPLUS_FEATURE_UID_PERF=y
#endif
# CONFIG_PID_NS is not set
CONFIG_SCHED_AUTOGROUP=y
CONFIG_SCHED_TUNE=y
@@ -53,7 +61,7 @@ CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=8
CONFIG_SECCOMP=y
CONFIG_OKL4_GUEST=y
# CONFIG_UNMAP_KERNEL_AT_EL0 is not set
CONFIG_UNMAP_KERNEL_AT_EL0=y
CONFIG_ARM64_SSBD=y
CONFIG_PRINT_VMEMLAYOUT=y
CONFIG_ARMV8_DEPRECATED=y
@@ -63,6 +71,7 @@ CONFIG_SETEND_EMULATION=y
CONFIG_ARM64_SW_TTBR0_PAN=y
# CONFIG_ARM64_VHE is not set
CONFIG_RANDOMIZE_BASE=y
CONFIG_BUILD_ARM64_KERNEL_COMPRESSION_GZIP=n
CONFIG_CMDLINE="cgroup_disable=pressure"
CONFIG_CMDLINE_EXTEND=y
CONFIG_BUILD_ARM64_UNCOMPRESSED_KERNEL=y
@@ -83,6 +92,9 @@ CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_BOOST=y
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
#ifdef OPLUS_BUG_STABILITY
CONFIG_CPU_FREQ_STAT=y
#endif
CONFIG_ARM_QCOM_CPUFREQ_HW=y
CONFIG_MSM_TZ_LOG=y
CONFIG_ARM64_CRYPTO=y
@@ -109,7 +121,10 @@ CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BFQ_GROUP_IOSCHED=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_MEMORY_HOTPLUG=y
#ifdef OPLUS_DEBUG_STABILITY
##CONFIG_MEMORY_HOTPLUG=y
# CONFIG_MEMORY_HOTPLUG is not set
# endif
CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
CONFIG_MEMORY_HOTPLUG_MOVABLE_NODE=y
CONFIG_MEMORY_HOTREMOVE=y
@@ -295,6 +310,10 @@ CONFIG_MHI_NETDEV=y
CONFIG_MHI_UCI=y
CONFIG_MHI_SATELLITE=y
CONFIG_ZRAM=y
#ifdef OPLUS_FEATURE_ZRAM_WRITEBACK
#CONFIG_ZRAM_WRITEBACK=y
#CONFIG_ZWB_HANDLE=y
#endif /* OPLUS_FEATURE_ZRAM_WRITEBACK */
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
CONFIG_BLK_DEV_RAM=y
@@ -318,6 +337,15 @@ CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
#ifdef OPLUS_FEATURE_UFSPLUS
CONFIG_UFSFEATURE=y
# CONFIG_UFSHPB is not set
CONFIG_UFSTW=y
CONFIG_UFSTW_IGNORE_GUARANTEE_BIT=y
CONFIG_UFSTW_BOOT_ENABLED=y
CONFIG_UFSHID=y
CONFIG_UFSHID_POC=y
#endif /*OPLUS_FEATURE_UFSPLUS*/
CONFIG_SCSI_UFS_CRYPTO=y
CONFIG_SCSI_UFS_CRYPTO_QTI=y
CONFIG_MD=y
@@ -369,7 +397,47 @@ CONFIG_TABLET_USB_HANWANG=y
CONFIG_TABLET_USB_KBTAB=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_FTS=y
CONFIG_TOUCHSCREEN_NT36XXX=y
#ifdef OPLUS_FEATURE_TP_BASIC
CONFIG_TOUCHPANEL_SAMSUNG_S6SY791=y
CONFIG_TOUCHPANEL_SAMSUNG_S6SY792=y
CONFIG_TOUCHPANEL_SAMSUNG_S6SY771=y
CONFIG_TOUCHPANEL_SYNAPTICS=y
CONFIG_TOUCHPANEL_SYNAPTICS_TCM_ONCELL=y
CONFIG_TOUCHPANEL_SYNAPTICS_S3706=y
CONFIG_TOUCHPANEL_SAMSUNG=y
CONFIG_TOUCHPANEL_OPLUS=y
CONFIG_TOUCHPANEL_GOODIX=y
CONFIG_TOUCHPANEL_GOODIX_GT9886=y
CONFIG_TOUCHPANEL_FOCAL=y
CONFIG_TOUCHPANEL_FOCAL_FT3658U=y
CONFIG_TOUCHIRQ_UPDATE_QOS=y
CONFIG_TOUCHPANEL_NEW_SET_IRQ_WAKE=y
CONFIG_OPLUS_TP_APK=y
CONFIG_TOUCHPANEL_ALGORITHM=y
CONFIG_TOUCHPANEL_NOVA=y
CONFIG_TOUCHPANEL_NOVA_NT36523_NOFLASH=y
CONFIG_TOUCHPANEL_NT_PEN_SUPPORT=y
CONFIG_TOUCHPANEL_NT_DIGITALNOISE_TEST=y
#endif /* OPLUS_FEATURE_TP_BASIC */
CONFIG_OPLUS_TRIKEY=y
CONFIG_OPLUS_TRIKEY_MAIN=y
CONFIG_OPLUS_TRIKEY_HALL=y
CONFIG_IST_UP=y
CONFIG_IST_DOWN=y
CONFIG_MXM_UP=y
CONFIG_MXM_DOWN=y
#ifdef OPLUS_FEATURE_TP_BASIC
CONFIG_OPLUS_FW_UPDATE=y
#endif /*OPLUS_FEATURE_TP_BASIC*/
#ifdef OPLUS_FEATURE_TP_BASIC
CONFIG_COLOR_CTRL=y
#endif /* OPLUS_FEATURE_TP_BASIC */
#ifdef OPLUS_FEATURE_TP_BASIC
CONFIG_COLOR_CTRL_V2=y
#endif /* OPLUS_FEATURE_TP_BASIC */
#CONFIG_TOUCHSCREEN_NT36XXX=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_QPNP_POWER_ON=y
CONFIG_INPUT_QTI_HAPTICS=y
@@ -407,11 +475,43 @@ CONFIG_POWER_RESET_QCOM=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
CONFIG_QPNP_SMB5=y
CONFIG_SMB1390_CHARGE_PUMP_PSY=y
CONFIG_SMB1355_SLAVE_CHARGER=y
CONFIG_QPNP_QNOVO5=y
#ifdef OPLUS_FEATURE_CHG_BASIC//tongfeng.Huang@ProDrv.CHG,add 2019/06/22 for charger
CONFIG_OPLUS_SM8250_CHARGER=y
CONFIG_OPLUS_CHIP_SOC_NODE=y
CONFIG_QPNP_FG_GEN4=y
CONFIG_HL6111R=y
CONFIG_OPLUS_CHARGER_WIRELESS_PEN=y
CONFIG_OPLUS_CHARGER_WIRELESS_RA9530=y
#else
#CONFIG_SMB1390_CHARGE_PUMP_PSY=y
#CONFIG_SMB1355_SLAVE_CHARGER=y
#CONFIG_QPNP_QNOVO5=y
#CONFIG_QPNP_FG_GEN4=y
#endif
#ifdef OPLUS_FEATURE_CHG_BASIC
CONFIG_OPLUS_SHORT_HW_CHECK=y
CONFIG_OPLUS_SHORT_USERSPACE=y
CONFIG_OPLUS_SHIP_MODE_SUPPORT=y
CONFIG_OPLUS_SMART_CHARGER_SUPPORT=y
CONFIG_OPLUS_CALL_MODE_SUPPORT=y
CONFIG_OPLUS_SHORT_C_BATT_CHECK=y
CONFIG_OPLUS_CHECK_CHARGERID_VOLT=y
CONFIG_OPLUS_SHORT_IC_CHECK=y
CONFIG_SONY_FF=y
CONFIG_HID_BETOP_FF=y
CONFIG_HID_PLAYSTATION=y
CONFIG_PLAYSTATION_FF=y
#endif
#ifndef OPLUS_FEATURE_CHG_BASIC//tongfeng.Huang@ProDrv.CHG,add 2019/12/22 for charger
#CONFIG_HL6111R=y
#endif
#ifdef OPLUS_FEATURE_CHG_BASIC
CONFIG_INPUT_JOYSTICK=y
CONFIG_JOYSTICK_XPAD=y
CONFIG_JOYSTICK_XPAD_FF=y
CONFIG_JOYSTICK_XPAD_LEDS=y
#endif /* OPLUS_FEATURE_CHG_BASIC */
CONFIG_THERMAL=y
CONFIG_THERMAL_WRITABLE_TRIPS=y
CONFIG_THERMAL_GOV_USER_SPACE=y
@@ -459,6 +559,9 @@ CONFIG_DVB_MPQ_SW=y
CONFIG_VIDEO_V4L2_VIDEOBUF2_CORE=y
CONFIG_I2C_RTC6226_QCA=y
CONFIG_DRM=y
#ifdef VENDOR_EDIT
CONFIG_DRM_MSM=y
#endif /* VENDOR_EDIT */
CONFIG_DRM_LONTIUM_LT9611UXC=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
@@ -706,6 +809,7 @@ CONFIG_ESOC_MDM_4x=y
CONFIG_ESOC_MDM_DRV=y
CONFIG_ESOC_MDM_DBG_ENG=y
CONFIG_SENSORS_SSC=y
CONFIG_SENSORS_SIMULATED_HALL=y
CONFIG_QCOM_KGSL=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
@@ -724,6 +828,13 @@ CONFIG_OVERLAY_FS=y
CONFIG_INCREMENTAL_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
#ifdef OPLUS_FEATURE_EXFAT_SUPPORT
CONFIG_NLS_UTF8=y
CONFIG_EXFAT_FS=y
# endif
#ifdef OPLUS_FEATURE_EMMC_SDCARD_OPTIMIZE
CONFIG_EMMC_SDCARD_OPTIMIZE=y
#endif
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_EFIVAR_FS=y
@@ -754,7 +865,7 @@ CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_XZ_DEC=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_CONSOLE_UNHASHED_POINTERS=y
# CONFIG_DEBUG_CONSOLE_UNHASHED_POINTERS is not set
CONFIG_DEBUG_MODULE_LOAD_INFO=y
CONFIG_DEBUG_INFO=y
CONFIG_PAGE_OWNER=y
@@ -822,3 +933,286 @@ CONFIG_CORESIGHT_HWEVENT=y
CONFIG_CORESIGHT_DUMMY=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_TGU=y
#ifdef OPLUS_SYSTEM_KERNEL
#/*Add for oplus project*/
CONFIG_OPLUS_COMMON_SOFT=y
CONFIG_OPLUS_DEVICE_IFNO=y
CONFIG_OPLUS_RF_CABLE_MONITOR=y
CONFIG_RECORD_MDMRST=y
#endif
#ifdef OPLUS_FEATURE_POWERINFO_STANDBY
CONFIG_OPLUS_WAKELOCK_PROFILER=y
#endif /* OPLUS_FEATURE_POWERINFO_STANDBY */
# ifdef VENDOR_EDIT
CONFIG_CS_F61_NDT=y
# endif /* VENDOR_EDIT */
#ifdef OPLUS_FEATURE_INPUT_BOOST_V4
# CONFIG_OPLUS_FEATURE_INPUT_BOOST_V4 is not set
#endif
#ifdef OPLUS_SYSTEM_KERNEL
#all system oplus feature writer here
CONFIG_OPLUS_FEATURE_UBOOT_LOG=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_OPLUS_FEATURE_HUNG_TASK_ENHANCE=y
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=60
CONFIG_OPLUS_FEATURE_FEEDBACK=y
CONFIG_OPLUS_FEATURE_OPROJECT=y
CONFIG_OPLUS_FEATURE_CMDLINE=y
CONFIG_OPLUS_FEATURE_PROJECTINFO=y
CONFIG_OPLUS_FEATURE_PHOENIX=y
CONFIG_OPLUS_FEATURE_PHOENIX_REBOOT_SPEED=y
CONFIG_OPLUS_FEATURE_KMSG_WB=y
CONFIG_OPLUS_FEATURE_SHUTDOWN_SPEED=y
CONFIG_OPLUS_FEATURE_OLC=y
CONFIG_OPLUS_FEATURE_FDLEAK_CHECK=y
CONFIG_OPLUS_FEATURE_DUMP_DEVICE_INFO=y
#add for qcom minidump customized
CONFIG_OPLUS_FEATURE_QCOM_MINIDUMP_ENHANCE=y
#Add for shutdown detect
CONFIG_OPLUS_FEATURE_SHUTDOWN_DETECT=y
#add slabtrace function
CONFIG_OPLUS_FEATURE_SLABTRACE_DEBUG=y
#endif
#ifdef OPLUS_FEATURE_QCOM_PMICWD
#Add for qcom pmic watchdog
CONFIG_OPLUS_FEATURE_QCOM_PMICWD=y
#endif
#ifdef OPLUS_FEATURE_FINGERPRINT
CONFIG_OPLUS_FINGERPRINT=y
CONFIG_OPLUS_FINGERPRINT_QCOM=y
CONFIG_OPLUS_FINGERPRINT_GOODIX_OPTICAL=y
CONFIG_OPLUS_FINGERPRINT_JIIOV_OPTICAL=y
#endif
CONFIG_OPLUS_FINGERPRINT_GKI_DISABLE=y
#ifdef OPLUS_FEATURE_SECURITY_COMMON
CONFIG_OPLUS_SECURE=y
CONFIG_OPLUS_SECURE_QCOM=y
CONFIG_OPLUS_SECURE_COMMON=y
#endif /* OPLUS_FEATURE_SECURITY_COMMON */
#ifdef OPLUS_FEATURE_HANS_FREEZE
CONFIG_OPLUS_HANS=y
#endif /*OPLUS_FEATURE_HANS_FREEZE*/
#ifdef VENDOR_EDIT//Qingjun.Wang@BSP.Haptic,add 2020/03/17 for vib aw8697
CONFIG_AW8697_HAPTIC=y
CONFIG_HAPTIC_FEEDBACK=y
#endif
#ifdef OPLUS_FEATURE_HEALTHINFO
CONFIG_OPLUS_MEM_MONITOR=y
CONFIG_FG_TASK_UID=y
CONFIG_OPLUS_HEALTHINFO=y
CONFIG_SLUB_DEBUG=y
#endif /* OPLUS_FEATURE_HEALTHINFO */
#ifdef OPLUS_FEATURE_HEALTHINFO
CONFIG_OPLUS_JANK_INFO=y
#endif /* OPLUS_FEATURE_HEALTHINFO */
#ifdef OPLUS_FEATURE_SCHED_ASSIST
CONFIG_OPLUS_FEATURE_AUDIO_OPT=y
#endif
#ifdef OPLUS_FEATURE_ZRAM_OPT
CONFIG_OPLUS_ZRAM_OPT=y
CONFIG_CRYPTO_LZ4=y
CONFIG_PGTABLE_MAPPING=y
CONFIG_CRYPTO_ZSTD=y
#endif
#ifdef OPLUS_FEATURE_MULTI_KSWAPD
CONFIG_OPLUS_MULTI_KSWAPD=y
CONFIG_KSWAPD_UNBIND_MAX_CPU=y
#endif
#ifdef VENDOR_EDIT
CONFIG_REGULATOR_PM8008=y
#ifdef OPLUS_FEATURE_DUMPDEVICE
#Add for dump device info
CONFIG_PSTORE=y
CONFIG_PSTORE_CONSOLE=y
CONFIG_PSTORE_PMSG=y
CONFIG_PSTORE_RAM=y
#endif
CONFIG_OPLUS_FEATURE_EROFS=y
CONFIG_EROFS_FS=y
#ifdef OPLUS_FEATURE_IOMONITOR
CONFIG_IOMONITOR=y
#CONFIG_IOMONITOR_WITH_F2FS=n
#endif /*OPLUS_FEATURE_IOMONITOR*/
CONFIG_OPLUS_FEATURE_PMIC_MONITOR=y
CONFIG_OPLUS_FEATURE_OF2FS=y
CONFIG_OPLUS_FEATURE_PANIC_FLUSH=y
CONFIG_OPLUS_FEATURE_OEXT4=y
CONFIG_F2FS_BD_STAT=y
#CONFIG_F2FS_GRADING_SSR is not set
#ifdef OPLUS_FEATURE_LOWMEM_DBG
CONFIG_OPLUS_FEATURE_LOWMEM_DBG=y
#endif /* OPLUS_FEATURE_LOWMEM_DBG */
#ifdef OPLUS_FEATURE_SCHED_ASSIST
CONFIG_BLK_WBT_SQ=y
CONFIG_BLK_WBT=y
CONFIG_OPLUS_FEATURE_UXIO_FIRST=y
#CONFIG_OPLUS_FEATURE_SCHED_SPREAD=y
#endif /*OPLUS_FEATURE_SCHED_ASSIST*/
#ifdef OPLUS_FEATURE_MEMLEAK_DETECT
CONFIG_KMALLOC_DEBUG=y
CONFIG_VMALLOC_DEBUG=y
CONFIG_DUMP_TASKS_MEM=y
#endif /*OPLUS_FEATURE_MEMLEAK_DETECT*/
#ifdef OPLUS_FEATURE_MEMLEAK_DETECT
CONFIG_SVELTE=y
#endif
#ifdef VEDOR_EDIT
CONFIG_OPLUS_FEATURE_RECORD_MDMRST=y
#endif
#ifdef OPLUS_ARCH_EXTENDS
CONFIG_OPLUS_FEATURE_MM_FEEDBACK=y
#endif
#ifdef VENDOR_EDIT
CONFIG_OPLUS_FEATURE_MIDAS=y
#endif
#ifdef CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE
CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE=y
#endif
#ifdef OPLUS_FEATURE_ACM
CONFIG_OPLUS_FEATURE_ACM=y
CONFIG_OPLUS_FEATURE_ACM_LOGGING=y
#endif /* OPLUS_FEATURE_ACM */
#ifdef OPLUS_FEATURE_SENSOR_DRIVER
CONFIG_OPLUS_SENSOR_FB_QC=y
#endif
#ifdef OPLUS_BUG_STABILITY
CONFIG_HARDEN_BRANCH_PREDICTOR=y
#endif
#add by shaojun.zou for cobuck opt 2021/08/16
CONFIG_SCHED_WALT_COBUCK=y
#ifdef OPLUS_FEATURE_SIM_DETECT
CONFIG_SIM_DETECT=y
#endif
#ifdef OPLUS_FEATURE_SIM_DETECT
CONFIG_OEM_QMI=y
#endif
#Add for theia
CONFIG_OPLUS_FEATURE_THEIA=y
##endif /* OPLUS_FEATURE_THEIA */
#ifdef OPLUS_FEATURE_ESIM
CONFIG_OPLUS_GPIO=y
#endif
#ifdef OPLUS_FEATURE_SAUPWK
#Add for theia saupwrkey
CONFIG_OPLUS_FEATURE_SAUPWK=y
#endif /* OPLUS_FEATURE_SAUPWK */
#ifdef OPLUS_FEATURE_RT_INFO
CONFIG_OPLUS_FEATURE_RT_INFO=n
#endif
#ifdef OPLUS_FEATURE_TPP
#jacky.ho@optimization add for tpp module
CONFIG_OPLUS_FEATURE_TPP=y
#endif /* OPLUS_FEATURE_TPP */
#ifdef OPLUS_FEATURE_IM
#ted.lin@optimization add for im module
CONFIG_OPLUS_FEATURE_IM=y
#endif /* OPLUS_FEATURE_IM */
#ifdef OPLUS_FEATURE_ION_BOOSTPOOL
CONFIG_OPLUS_ION_BOOSTPOOL=y
#endif /* OPLUS_FEATURE_ION_BOOSTPOOL */
#ifdef OPLUS_FEATURE_TPD
CONFIG_OPLUS_FEATURE_TPD=y
#endif
#ifdef OPLUS_FEATURE_SENSOR
CONFIG_SSC_INTERACTIVE=y
#endif
#ifdef OPLUS_FEATURE_WIFI_SLA
#todo: need to change to m when GKI
CONFIG_OPLUS_FEATURE_WIFI_SLA=y
#endif /* OPLUS_FEATURE_WIFI_SLA */
#ifdef OPLUS_FEATURE_WIFI_ROUTERBOOST
CONFIG_OPLUS_FEATURE_WIFI_ROUTERBOOST=m
#endif /* OPLUS_FEATURE_WIFI_ROUTERBOOST */
CONFIG_DYNAMIC_TUNNING_SWAPPINESS=y
#ifdef OPLUS_FEATURE_DATA_EVAL
CONFIG_OPLUS_FEATURE_DATA_EVAL=y
#endif /* OPLUS_FEATURE_DATA_VAL */
#ifdef OPLUS_FEATURE_DATA_LIMIT
CONFIG_IFB=y
CONFIG_NET_SCH_MULTIQ=y
CONFIG_NET_SCH_NETEM=y
CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_FLOW=y
CONFIG_NET_EMATCH_CMP=y
CONFIG_NET_EMATCH_NBYTE=y
CONFIG_NET_EMATCH_META=y
CONFIG_NET_EMATCH_TEXT=y
CONFIG_NET_ACT_GACT=y
CONFIG_NET_ACT_MIRRED=y
CONFIG_NET_ACT_SKBEDIT=y
#endif /* OPLUS_FEATURE_DATA_LIMIT */
#CONFIG_NANDSWAP=y
#CONFIG_NANDSWAP_DEBUG=y
CONFIG_FRAME_WARN=4096
#ifdef OPLUS_FEATURE_HYBRIDSWAP
CONFIG_HYBRIDSWAP=y
CONFIG_HYBRIDSWAP_SWAPD=y
CONFIG_HYBRIDSWAP_CORE=y
#endif
#ifdef OPLUS_FEATURE_DNS_HOOK
CONFIG_OPLUS_FEATURE_DNS_HOOK=y
#endif /* OPLUS_FEATURE_DNS_HOOK */
#ifdef OPLUS_FEATURE_STATS_CALC
CONFIG_OPLUS_FEATURE_STATS_CALC=y
#endif /* OPLUS_FEATURE_STATS_CALC */
#ifdef OPLUS_FEATURE_MDMFEATURE
CONFIG_OPLUS_FEATURE_MDMFEATURE=y
#endif
#ifdef OPLUS_FEATURE_GAME_OPT
CONFIG_OPLUS_FEATURE_GAME_OPT=y
#endif
CONFIG_OPLUS_BINDER_STRATEGY=n
#ifdef Multi-gen LRU
CONFIG_LRU_GEN=y
CONFIG_LRU_GEN_ENABLED=n
CONFIG_LRU_GEN_STATS=n
#endif
CONFIG_OPLUS_FEATURE_CPU_JANKINFO=y
CONFIG_CRYPTO_CHACHA20POLY1305=y

View File

@@ -6,6 +6,7 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_IRQ_TIME_ACCOUNTING=y
CONFIG_SCHED_WALT=y
CONFIG_MMAP_LOCK_OPT=y
CONFIG_TASKSTATS=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
@@ -22,6 +23,10 @@ CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
#ifdef OPLUS_FEATURE_TASK_CPUSTATS
CONFIG_OPLUS_SCHED=y
CONFIG_OPLUS_CTP=y
#endif /* OPLUS_FEATURE_TASK_CPUSTATS */
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_BPF=y
CONFIG_SCHED_CORE_CTL=y
@@ -38,7 +43,6 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
CONFIG_BPF_JIT_ALWAYS_ON=y
CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB_FREELIST_RANDOM=y
CONFIG_SLAB_FREELIST_HARDENED=y
@@ -64,6 +68,8 @@ CONFIG_RANDOMIZE_BASE=y
CONFIG_CMDLINE="cgroup_disable=pressure"
CONFIG_CMDLINE_EXTEND=y
# CONFIG_EFI is not set
CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
CONFIG_IMG_GZ_DTB=y
CONFIG_BUILD_ARM64_UNCOMPRESSED_KERNEL=y
CONFIG_COMPAT=y
CONFIG_PM_WAKELOCKS=y
@@ -80,6 +86,9 @@ CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_BOOST=y
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
#ifdef VENDOR_EDIT
CONFIG_CPU_FREQ_STAT=y
#endif
CONFIG_ARM_QCOM_CPUFREQ_HW=y
CONFIG_ARM_QCOM_CPUFREQ_HW_DEBUG=y
CONFIG_MSM_TZ_LOG=y
@@ -106,7 +115,10 @@ CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BFQ_GROUP_IOSCHED=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_MEMORY_HOTPLUG=y
#ifdef OPLUS_DEBUG_STABILITY
##CONFIG_MEMORY_HOTPLUG=y
# CONFIG_MEMORY_HOTPLUG is not set
# endif
CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
CONFIG_MEMORY_HOTPLUG_MOVABLE_NODE=y
CONFIG_MEMORY_HOTREMOVE=y
@@ -271,6 +283,11 @@ CONFIG_CFG80211=y
CONFIG_CFG80211_INTERNAL_REGDB=y
CONFIG_RFKILL=y
CONFIG_NFC_NQ=y
#ifdef OPLUS_NFC_BRINGUP
#Add for the kernel Macro for NXP PN557 NFC kernel
CONFIG_NFC_PN553_DEVICES=y
CONFIG_NXP_P73_DEVICES=y
#endif /*OPLUS_NFC_BRINGUP*/
CONFIG_FW_LOADER_USER_HELPER=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
# CONFIG_FW_CACHE is not set
@@ -279,7 +296,7 @@ CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
CONFIG_MHI_BUS=y
CONFIG_ZRAM=y
CONFIG_ZRAM_DEDUP=y
#CONFIG_ZRAM_DEDUP=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
CONFIG_BLK_DEV_RAM=y
@@ -293,11 +310,21 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
#CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
#ifdef OPLUS_FEATURE_UFSPLUS
CONFIG_UFSFEATURE=y
# CONFIG_UFSHPB is not set
CONFIG_UFSTW=y
# CONFIG_HPB_SUP_ONLY_4 is not set
# CONFIG_HPB_SUP_8_TO_32 is not set
# CONFIG_HPB_SUP_OVER_36 is not set
CONFIG_UFSTW_IGNORE_GUARANTEE_BIT=y
#endif /*OPLUS_FEATURE_UFSPLUS*/
CONFIG_SCSI_UFS_CRYPTO=y
CONFIG_SCSI_UFS_CRYPTO_QTI=y
CONFIG_MD=y
@@ -344,10 +371,29 @@ CONFIG_TABLET_USB_GTCO=y
CONFIG_TABLET_USB_HANWANG=y
CONFIG_TABLET_USB_KBTAB=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_SECURE_TOUCH_SYNAPTICS_DSX=y
CONFIG_OPLUS_TRI_STATE_KEY=y
CONFIG_TOUCHIRQ_UPDATE_QOS=y
CONFIG_TOUCHPANEL_NEW_SET_IRQ_WAKE=y
CONFIG_TOUCHPANEL_OPLUS=y
CONFIG_TOUCHPANEL_ALGORITHM=y
CONFIG_TOUCHPANEL_SYNAPTICS=y
CONFIG_TOUCHPANEL_GOODIX=y
CONFIG_TOUCHPANEL_SAMSUNG=y
CONFIG_TOUCHPANEL_SYNAPTICS_TCM_ONCELL=y
CONFIG_TOUCHPANEL_GOODIX_GT9886=y
CONFIG_TOUCHPANEL_SAMSUNG_S6SY771=y
CONFIG_TOUCHPANEL_FOCAL=y
CONFIG_TOUCHPANEL_FOCAL_FT3518=y
CONFIG_TOUCHPANEL_NOVA=y
CONFIG_TOUCHPANEL_NOVA_NT36672C_NOFLASH=y
CONFIG_TOUCHPANEL_NT_DIGITALNOISE_TEST=y
CONFIG_TOUCHPANEL_ILITEK=y
CONFIG_TOUCHPANEL_ILITEK_ILITEK7807S=y
# CONFIG_SECURE_TOUCH_SYNAPTICS_DSX is not set
CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_REFLASH=y
CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_RECOVERY=y
CONFIG_TOUCHSCREEN_FTS=y
CONFIG_TOUCHSCREEN_FTS=n
CONFIG_TOUCHSCREEN_ST=n
CONFIG_INPUT_MISC=y
CONFIG_INPUT_QPNP_POWER_ON=y
CONFIG_INPUT_UINPUT=y
@@ -356,6 +402,7 @@ CONFIG_INPUT_UINPUT=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVMEM is not set
CONFIG_SERIAL_MSM_GENI=y
CONFIG_SERIAL_MSM_GENI_CONSOLE=y
CONFIG_SERIAL_MSM_GENI_HALF_SAMPLING=y
CONFIG_TTY_PRINTK=y
CONFIG_HW_RANDOM=y
@@ -379,12 +426,44 @@ CONFIG_POWER_RESET_QCOM=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
CONFIG_QPNP_SMB5=y
CONFIG_SMB1390_CHARGE_PUMP_PSY=y
#ifdef OPLUS_FEATURE_CHG_BASIC//tongfeng.Huang@ProDrv.CHG,add 2019/06/22 for charger
CONFIG_OPLUS_SM7250R_CHARGER=y
CONFIG_SMB1355_SLAVE_CHARGER=y
CONFIG_QPNP_QG=y
CONFIG_SMB1398_CHARGER=y
CONFIG_OPLUS_CHIP_SOC_NODE=y
CONFIG_OPLUS_QPNP_QG=y
#else
#CONFIG_SMB1390_CHARGE_PUMP_PSY=y
#CONFIG_SMB1398_CHARGER=y
#CONFIG_QPNP_QG=y
#CONFIG_QPNP_FG_GEN4=y
#endif
#ifdef OPLUS_FEATURE_CHG_BASIC
CONFIG_OPLUS_SHORT_HW_CHECK=y
CONFIG_OPLUS_SHORT_USERSPACE=y
CONFIG_OPLUS_SHIP_MODE_SUPPORT=y
CONFIG_OPLUS_SMART_CHARGER_SUPPORT=y
CONFIG_OPLUS_CHARGER_OPTIGA=y
CONFIG_OPLUS_SHORT_C_BATT_CHECK=y
CONFIG_OPLUS_CHECK_CHARGERID_VOLT=y
CONFIG_OPLUS_SHORT_IC_CHECK=y
CONFIG_SONY_FF=y
CONFIG_HID_BETOP_FF=y
CONFIG_HID_PLAYSTATION=y
CONFIG_PLAYSTATION_FF=y
#endif
#ifndef OPLUS_FEATURE_CHG_BASIC//tongfeng.Huang@ProDrv.CHG,add 2019/12/22 for charger
#CONFIG_HL6111R=y
#endif
#ifdef OPLUS_FEATURE_CHG_BASIC
CONFIG_INPUT_JOYSTICK=y
CONFIG_JOYSTICK_XPAD=y
CONFIG_JOYSTICK_XPAD_FF=y
CONFIG_JOYSTICK_XPAD_LEDS=y
#endif /* OPLUS_FEATURE_CHG_BASIC */
CONFIG_THERMAL=y
CONFIG_THERMAL_STATISTICS=y
#ifndef OPLUS_FEATURE_THERMAL_STATISTICS
#CONFIG_THERMAL_STATISTICS=y
#endif
CONFIG_THERMAL_WRITABLE_TRIPS=y
CONFIG_THERMAL_GOV_USER_SPACE=y
CONFIG_THERMAL_GOV_LOW_LIMITS=y
@@ -395,6 +474,9 @@ CONFIG_THERMAL_TSENS=y
CONFIG_QTI_ADC_TM=y
CONFIG_QTI_VIRTUAL_SENSOR=y
CONFIG_QTI_QMI_SENSOR=y
CONFIG_SSC_INTERACTIVE=y
CONFIG_OPLUS_SENSOR_FB_QC=y
CONFIG_SENSORS_SSC=y
CONFIG_QTI_BCL_PMIC5=y
CONFIG_QTI_BCL_SOC_DRIVER=y
CONFIG_QTI_QMI_COOLING_DEVICE=y
@@ -417,12 +499,16 @@ CONFIG_REGULATOR_REFGEN=y
CONFIG_REGULATOR_RPMH=y
CONFIG_REGULATOR_STUB=y
CONFIG_REGULATOR_PM8008=y
#ifdef OPLUS_BUG_STABILITY
CONFIG_REGULATOR_TPS65132=y
CONFIG_REGULATOR_TPS65132_FOR_20267=y
#endif /* OPLUS_BUG_STABILITY */
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_VIDEO_ADV_DEBUG=y
#CONFIG_VIDEO_ADV_DEBUG=y
CONFIG_VIDEO_FIXED_MINOR_RANGES=y
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_MSM_NPU=y
@@ -431,6 +517,10 @@ CONFIG_DVB_MPQ_DEMUX=m
CONFIG_DVB_MPQ_SW=y
CONFIG_VIDEO_V4L2_VIDEOBUF2_CORE=y
CONFIG_DRM=y
#ifdef VENDOR_EDIT
CONFIG_DRM_MSM=y
#endif /* VENDOR_EDIT */
CONFIG_DRM_LONTIUM_LT9611UXC=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y
@@ -661,6 +751,15 @@ CONFIG_OVERLAY_FS=y
CONFIG_INCREMENTAL_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_OPLUS_FEATURE_EROFS=y
CONFIG_EROFS_FS=y
#ifdef OPLUS_FEATURE_EXFAT_SUPPORT
CONFIG_NLS_UTF8=y
CONFIG_EXFAT_FS=y
# endif
#ifdef OPLUS_FEATURE_EMMC_SDCARD_OPTIMIZE
CONFIG_EMMC_SDCARD_OPTIMIZE=y
#endif
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_ECRYPT_FS=y
@@ -694,7 +793,7 @@ CONFIG_SCHEDSTATS=y
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_DEBUG_LIST=y
CONFIG_IPC_LOGGING=y
CONFIG_DEBUG_ALIGN_RODATA=y
#CONFIG_DEBUG_ALIGN_RODATA=y
CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
@@ -708,3 +807,262 @@ CONFIG_CORESIGHT_HWEVENT=y
CONFIG_CORESIGHT_DUMMY=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_TGU=y
#ifdef OPLUS_SYSTEM_KERNEL
#/*Add for oplus project*/
CONFIG_OPLUS_COMMON_SOFT=y
CONFIG_OPLUS_DEVICE_IFNO=y
CONFIG_OPLUS_RF_CABLE_MONITOR=y
CONFIG_RECORD_MDMRST=y
#endif
#ifdef OPLUS_FEATURE_POWERINFO_STANDBY
CONFIG_OPLUS_WAKELOCK_PROFILER=y
#endif /* OPLUS_FEATURE_POWERINFO_STANDBY */
# ifdef VENDOR_EDIT
CONFIG_CS_F61_NDT=y
# endif /* VENDOR_EDIT */
#ifdef OPLUS_SYSTEM_KERNEL
#all system oplus feature writer here
CONFIG_OPLUS_FEATURE_UBOOT_LOG=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_OPLUS_FEATURE_HUNG_TASK_ENHANCE=y
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=60
CONFIG_OPLUS_FEATURE_FEEDBACK=y
CONFIG_OPLUS_FEATURE_OPROJECT=y
CONFIG_OPLUS_FEATURE_CMDLINE=y
CONFIG_OPLUS_FEATURE_PROJECTINFO=y
CONFIG_OPLUS_FEATURE_PHOENIX=y
CONFIG_OPLUS_FEATURE_PHOENIX_REBOOT_SPEED=y
CONFIG_OPLUS_FEATURE_KMSG_WB=y
CONFIG_OPLUS_FEATURE_SHUTDOWN_SPEED=y
CONFIG_OPLUS_FEATURE_OLC=y
CONFIG_KPROBES=y
CONFIG_KRETPROBES=y
CONFIG_OPLUS_FEATURE_FDLEAK_CHECK=y
#add for qcom minidump customized
CONFIG_OPLUS_FEATURE_QCOM_MINIDUMP_ENHANCE=y
#Add for shutdown detect
CONFIG_OPLUS_FEATURE_SHUTDOWN_DETECT=y
#add slabtrace function
# CONFIG_OPLUS_FEATURE_SLABTRACE_DEBUG is not set
#enable oplus misc feature
#CONFIG_OPLUS_FEATURE_MISC is not set
#endif
#ifdef OPLUS_FEATURE_QCOM_PMICWD
# Add for qcom pmic watchdog
CONFIG_OPLUS_FEATURE_QCOM_PMICWD=y
#endif
#ifdef OPLUS_FEATURE_FINGERPRINT
CONFIG_OPLUS_FINGERPRINT=y
CONFIG_OPLUS_FINGERPRINT_QCOM=y
CONFIG_OPLUS_FINGERPRINT_GOODIX_OPTICAL=y
CONFIG_OPLUS_FINGERPRINT_JIIOV_OPTICAL=y
CONFIG_OPLUS_FINGERPRINT_SILEAD=y
#endif
CONFIG_OPLUS_FINGERPRINT_GKI_DISABLE=y
#ifdef OPLUS_FEATURE_SECURITY_COMMON
CONFIG_OPLUS_SECURE=y
CONFIG_OPLUS_SECURE_QCOM=y
CONFIG_OPLUS_SECURE_COMMON=y
#endif /* OPLUS_FEATURE_SECURITY_COMMON */
#ifdef OPLUS_FEATURE_HANS_FREEZE
CONFIG_OPLUS_HANS=y
#endif /*OPLUS_FEATURE_HANS_FREEZE*/
#ifdef VENDOR_EDIT//Qingjun.Wang@BSP.Haptic,add 2020/03/17 for vib aw8697
CONFIG_AW8697_HAPTIC=y
#endif
#ifdef OPLUS_FEATURE_HEALTHINFO
CONFIG_OPLUS_MEM_MONITOR=y
CONFIG_FG_TASK_UID=y
CONFIG_OPLUS_HEALTHINFO=y
CONFIG_SLUB_DEBUG=y
#endif /* OPLUS_FEATURE_HEALTHINFO */
#ifdef OPLUS_FEATURE_HEALTHINFO
CONFIG_OPLUS_JANK_INFO=y
#endif /* OPLUS_FEATURE_HEALTHINFO */
#ifdef OPLUS_FEATURE_ZRAM_OPT
CONFIG_OPLUS_ZRAM_OPT=y
CONFIG_CRYPTO_LZ4=y
CONFIG_PGTABLE_MAPPING=y
CONFIG_CRYPTO_ZSTD=y
#endif
#ifdef OPLUS_FEATURE_MULTI_KSWAPD
CONFIG_OPLUS_MULTI_KSWAPD=y
CONFIG_KSWAPD_UNBIND_MAX_CPU=y
#endif
#ifdef VENDOR_EDIT
CONFIG_REGULATOR_PM8008=y
#ifdef OPLUS_FEATURE_DUMPDEVICE
#Add for dump device info
CONFIG_PSTORE=y
CONFIG_PSTORE_CONSOLE=y
CONFIG_PSTORE_PMSG=y
CONFIG_PSTORE_RAM=y
#endif
#ifdef OPLUS_FEATURE_IOMONITOR
#CONFIG_IOMONITOR=y
#CONFIG_IOMONITOR_WITH_F2FS=n
#endif /*OPLUS_FEATURE_IOMONITOR*/
CONFIG_OPLUS_FEATURE_PMIC_MONITOR=y
CONFIG_OPLUS_FEATURE_OF2FS=y
CONFIG_OPLUS_FEATURE_PANIC_FLUSH=y
CONFIG_OPLUS_FEATURE_OEXT4=y
CONFIG_F2FS_BD_STAT=y
#CONFIG_F2FS_GRADING_SSR is not set
#ifdef OPLUS_FEATURE_LOWMEM_DBG
CONFIG_OPLUS_FEATURE_LOWMEM_DBG=y
#endif /* OPLUS_FEATURE_LOWMEM_DBG */
#ifdef OPLUS_FEATURE_SCHED_ASSIST
CONFIG_BLK_WBT_SQ=y
CONFIG_BLK_WBT=y
CONFIG_OPLUS_FEATURE_UXIO_FIRST=y
#endif /*OPLUS_FEATURE_SCHED_ASSIST*/
#ifdef OPLUS_FEATURE_MEMLEAK_DETECT
CONFIG_KMALLOC_DEBUG=y
CONFIG_VMALLOC_DEBUG=y
CONFIG_DUMP_TASKS_MEM=y
#endif /*OPLUS_FEATURE_MEMLEAK_DETECT*/
#ifdef OPLUS_FEATURE_MEMLEAK_DETECT
CONFIG_SVELTE=y
#endif
#ifdef VEDOR_EDIT
CONFIG_OPLUS_FEATURE_RECORD_MDMRST=y
#endif
#ifdef OPLUS_ARCH_EXTENDS
CONFIG_OPLUS_FEATURE_MM_FEEDBACK=y
#endif
#ifdef OPLUS_FEATURE_MIDAS
CONFIG_OPLUS_FEATURE_MIDAS=y
#endif
#ifdef CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE
CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE=y
#endif
#ifdef OPLUS_FEATURE_ACM
CONFIG_OPLUS_FEATURE_ACM=y
CONFIG_OPLUS_FEATURE_ACM_LOGGING=y
#endif /* OPLUS_FEATURE_ACM */
#ifdef OPLUS_BUG_STABILITY
CONFIG_HARDEN_BRANCH_PREDICTOR=y
#endif
CONFIG_OPLUS_FEATURE_THEIA=y
##endif /* OPLUS_FEATURE_THEIA */
#ifdef OPLUS_FEATURE_SAUPWK
CONFIG_OPLUS_FEATURE_SAUPWK=y
#endif /* OPLUS_FEATURE_SAUPWK */
#ifdef OPLUS_FEATURE_DUMP_DEVICE_INFO
CONFIG_OPLUS_FEATURE_DUMP_DEVICE_INFO=y
#endif /* OPLUS_FEATURE_DUMP_DEVICE_INFO */
#ifdef OPLUS_BUG_STABILITY
CONFIG_UNMAP_KERNEL_AT_EL0=y
#endif
#ifdef OPLUS_FEATURE_GPIO_NC
CONFIG_OPLUS_GPIO_NC=y
#endif
#ifdef OPLUS_FEATURE_BOOTLOADER_LOG
CONFIG_BOOTLOADER_LOG=y
#endif
#ifdef OPLUS_FEATURE_PARAM
CONFIG_PARAM_READ_WRITE=y
#endif
#ifdef OPLUS_FEATURE_TP_BASIC
CONFIG_SIM_DETECT=y
#endif
#ifdef OPLUS_FEATURE_TP_BASIC
CONFIG_OPLUS_FW_UPDATE=y
#endif /*OPLUS_FEATURE_TP_BASIC*/
#ifdef OPLUS_FEATURE_ION_BOOSTPOOL
#CONFIG_OPLUS_ION_BOOSTPOOL=y ,Add for disable boost pool ,fill generic ION POOL
#endif /* OPLUS_FEATURE_ION_BOOSTPOOL */
#ifdef OPLUS_FEATURE_WIFI_SLA
#todo: need to change to m when GKI
CONFIG_OPLUS_FEATURE_WIFI_SLA=y
#endif /* OPLUS_FEATURE_WIFI_SLA */
#ifdef OPLUS_FEATURE_WIFI_ROUTERBOOST
CONFIG_OPLUS_FEATURE_WIFI_ROUTERBOOST=m
#endif /* OPLUS_FEATURE_WIFI_ROUTERBOOST */
#ifdef OPLUS_FEATURE_DATA_EVAL
CONFIG_OPLUS_FEATURE_DATA_EVAL=y
#endif /* OPLUS_FEATURE_DATA_VAL */
#ifdef OPLUS_FEATURE_DATA_LIMIT
CONFIG_IFB=y
CONFIG_NET_SCH_MULTIQ=y
CONFIG_NET_SCH_NETEM=y
CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_FLOW=y
CONFIG_NET_EMATCH_CMP=y
CONFIG_NET_EMATCH_NBYTE=y
CONFIG_NET_EMATCH_META=y
CONFIG_NET_EMATCH_TEXT=y
CONFIG_NET_ACT_GACT=y
CONFIG_NET_ACT_MIRRED=y
CONFIG_NET_ACT_SKBEDIT=y
#endif /* OPLUS_FEATURE_DATA_LIMIT */
#CONFIG_DEBUG_SPINLOCK=y
#CONFIG_NANDSWAP=y
#CONFIG_NANDSWAP_DEBUG=y
#ifdef OPLUS_FEATURE_IM
#ted.lin@optimization add for im module
CONFIG_OPLUS_FEATURE_IM=y
#endif /* OPLUS_FEATURE_IM */
CONFIG_FRAME_WARN=4096
CONFIG_DYNAMIC_TUNNING_SWAPPINESS=y
#ifdef OPLUS_FEATURE_HYBRIDSWAP
CONFIG_HYBRIDSWAP=y
CONFIG_HYBRIDSWAP_SWAPD=y
CONFIG_HYBRIDSWAP_CORE=y
#endif
#ifdef OPLUS_FEATURE_UID_PERF
CONFIG_OPLUS_FEATURE_UID_PERF=y
#endif
#ifdef OPLUS_FEATURE_ESIM
CONFIG_OPLUS_GPIO=y
#endif /* OPLUS_FEATURE_ESIM */
#ifdef OPLUS_FEATURE_TPD
CONFIG_OPLUS_FEATURE_TPD=y
#endif
#ifdef OPLUS_FEATURE_DNS_HOOK
CONFIG_OPLUS_FEATURE_DNS_HOOK=y
#endif /* OPLUS_FEATURE_DNS_HOOK */
#ifdef OPLUS_FEATURE_STATS_CALC
CONFIG_OPLUS_FEATURE_STATS_CALC=y
#endif /* OPLUS_FEATURE_STATS_CALC */
CONFIG_OPLUS_BINDER_STRATEGY=n
#ifdef OPLUS_FEATURE_MDMFEATURE
CONFIG_OPLUS_FEATURE_MDMFEATURE=y
#endif
#ifdef OPLUS_FEATURE_GAME_OPT
CONFIG_OPLUS_FEATURE_GAME_OPT=y
#endif
CONFIG_OPLUS_FEATURE_CPU_JANKINFO=y

View File

@@ -5,6 +5,7 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_IRQ_TIME_ACCOUNTING=y
CONFIG_SCHED_WALT=y
CONFIG_MMAP_LOCK_OPT=y
CONFIG_TASKSTATS=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
@@ -23,6 +24,10 @@ CONFIG_BLK_CGROUP=y
CONFIG_DEBUG_BLK_CGROUP=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
#ifdef OPLUS_FEATURE_TASK_CPUSTATS
CONFIG_OPLUS_SCHED=y
CONFIG_OPLUS_CTP=y
#endif /* OPLUS_FEATURE_TASK_CPUSTATS */
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_BPF=y
CONFIG_CGROUP_DEBUG=y
@@ -64,7 +69,7 @@ CONFIG_ARM64_SW_TTBR0_PAN=y
CONFIG_RANDOMIZE_BASE=y
CONFIG_CMDLINE="cgroup_disable=pressure"
CONFIG_CMDLINE_EXTEND=y
CONFIG_BUILD_ARM64_UNCOMPRESSED_KERNEL=y
CONFIG_BUILD_ARM64_KERNEL_COMPRESSION_GZIP=y
CONFIG_COMPAT=y
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
@@ -81,6 +86,9 @@ CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_BOOST=y
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
#ifdef VENDOR_EDIT
CONFIG_CPU_FREQ_STAT=y
#endif
CONFIG_ARM_QCOM_CPUFREQ_HW=y
CONFIG_ARM_QCOM_CPUFREQ_HW_DEBUG=y
CONFIG_MSM_TZ_LOG=y
@@ -108,7 +116,10 @@ CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BFQ_GROUP_IOSCHED=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_MEMORY_HOTPLUG=y
#ifdef OPLUS_DEBUG_STABILITY
##CONFIG_MEMORY_HOTPLUG=y
# CONFIG_MEMORY_HOTPLUG is not set
# endif
CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
CONFIG_MEMORY_HOTPLUG_MOVABLE_NODE=y
CONFIG_MEMORY_HOTREMOVE=y
@@ -277,6 +288,11 @@ CONFIG_CFG80211=y
CONFIG_CFG80211_INTERNAL_REGDB=y
CONFIG_RFKILL=y
CONFIG_NFC_NQ=y
#ifdef OPLUS_NFC_BRINGUP
#Add for the kernel Macro for NXP PN557 NFC kernel
CONFIG_NFC_PN553_DEVICES=y
CONFIG_NXP_P73_DEVICES=y
#endif /*OPLUS_NFC_BRINGUP*/
CONFIG_FW_LOADER_USER_HELPER=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
# CONFIG_FW_CACHE is not set
@@ -285,7 +301,7 @@ CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
CONFIG_MHI_BUS=y
CONFIG_ZRAM=y
CONFIG_ZRAM_DEDUP=y
#CONFIG_ZRAM_DEDUP=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
CONFIG_BLK_DEV_RAM=y
@@ -305,6 +321,15 @@ CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
#ifdef OPLUS_FEATURE_UFSPLUS
CONFIG_UFSFEATURE=y
# CONFIG_UFSHPB is not set
CONFIG_UFSTW=y
# CONFIG_HPB_SUP_ONLY_4 is not set
# CONFIG_HPB_SUP_8_TO_32 is not set
# CONFIG_HPB_SUP_OVER_36 is not set
CONFIG_UFSTW_IGNORE_GUARANTEE_BIT=y
#endif /*OPLUS_FEATURE_UFSPLUS*/
CONFIG_SCSI_UFS_CRYPTO=y
CONFIG_SCSI_UFS_CRYPTO_QTI=y
CONFIG_MD=y
@@ -351,10 +376,29 @@ CONFIG_TABLET_USB_GTCO=y
CONFIG_TABLET_USB_HANWANG=y
CONFIG_TABLET_USB_KBTAB=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_SECURE_TOUCH_SYNAPTICS_DSX=y
CONFIG_OPLUS_TRI_STATE_KEY=y
CONFIG_TOUCHIRQ_UPDATE_QOS=y
CONFIG_TOUCHPANEL_NEW_SET_IRQ_WAKE=y
CONFIG_TOUCHPANEL_OPLUS=y
CONFIG_TOUCHPANEL_ALGORITHM=y
CONFIG_TOUCHPANEL_SYNAPTICS=y
CONFIG_TOUCHPANEL_GOODIX=y
CONFIG_TOUCHPANEL_SAMSUNG=y
CONFIG_TOUCHPANEL_SYNAPTICS_TCM_ONCELL=y
CONFIG_TOUCHPANEL_GOODIX_GT9886=y
CONFIG_TOUCHPANEL_SAMSUNG_S6SY771=y
CONFIG_TOUCHPANEL_FOCAL=y
CONFIG_TOUCHPANEL_FOCAL_FT3518=y
CONFIG_TOUCHPANEL_NOVA=y
CONFIG_TOUCHPANEL_NOVA_NT36672C_NOFLASH=y
CONFIG_TOUCHPANEL_NT_DIGITALNOISE_TEST=y
CONFIG_TOUCHPANEL_ILITEK=y
CONFIG_TOUCHPANEL_ILITEK_ILITEK7807S=y
# CONFIG_SECURE_TOUCH_SYNAPTICS_DSX is not set
CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_REFLASH=y
CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_RECOVERY=y
CONFIG_TOUCHSCREEN_FTS=y
CONFIG_TOUCHSCREEN_FTS=n
CONFIG_TOUCHSCREEN_ST=n
CONFIG_INPUT_MISC=y
CONFIG_INPUT_QPNP_POWER_ON=y
CONFIG_INPUT_UINPUT=y
@@ -387,12 +431,46 @@ CONFIG_POWER_RESET_QCOM=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
CONFIG_QPNP_SMB5=y
CONFIG_SMB1390_CHARGE_PUMP_PSY=y
#ifdef OPLUS_FEATURE_CHG_BASIC//tongfeng.Huang@ProDrv.CHG,add 2019/06/22 for charger
CONFIG_OPLUS_SM7250R_CHARGER=y
CONFIG_SMB1355_SLAVE_CHARGER=y
CONFIG_QPNP_QG=y
CONFIG_SMB1398_CHARGER=y
CONFIG_OPLUS_CHIP_SOC_NODE=y
CONFIG_OPLUS_QPNP_QG=y
#else
#CONFIG_SMB1390_CHARGE_PUMP_PSY=y
#CONFIG_SMB1398_CHARGER=y
#CONFIG_QPNP_QG=y
#CONFIG_QPNP_FG_GEN4=y
#endif
#ifdef OPLUS_FEATURE_CHG_BASIC
CONFIG_OPLUS_SHORT_HW_CHECK=y
CONFIG_OPLUS_SHORT_USERSPACE=y
CONFIG_OPLUS_SHIP_MODE_SUPPORT=y
CONFIG_OPLUS_SMART_CHARGER_SUPPORT=y
CONFIG_OPLUS_CHARGER_OPTIGA=y
CONFIG_OPLUS_SHORT_C_BATT_CHECK=y
CONFIG_OPLUS_CHECK_CHARGERID_VOLT=y
CONFIG_OPLUS_SHORT_IC_CHECK=y
CONFIG_SONY_FF=y
CONFIG_HID_BETOP_FF=y
CONFIG_HID_PLAYSTATION=y
CONFIG_PLAYSTATION_FF=y
#endif
#ifndef OPLUS_FEATURE_CHG_BASIC//tongfeng.Huang@ProDrv.CHG,add 2019/12/22 for charger
#CONFIG_HL6111R=y
#endif
#ifdef OPLUS_FEATURE_CHG_BASIC
CONFIG_INPUT_JOYSTICK=y
CONFIG_JOYSTICK_XPAD=y
CONFIG_JOYSTICK_XPAD_FF=y
CONFIG_JOYSTICK_XPAD_LEDS=y
#endif /* OPLUS_FEATURE_CHG_BASIC */
CONFIG_THERMAL=y
CONFIG_THERMAL_STATISTICS=y
#ifndef OPLUS_FEATURE_THERMAL_STATISTICS
#CONFIG_THERMAL_STATISTICS=y
#endif
CONFIG_THERMAL_WRITABLE_TRIPS=y
CONFIG_THERMAL_GOV_USER_SPACE=y
CONFIG_THERMAL_GOV_LOW_LIMITS=y
@@ -403,6 +481,9 @@ CONFIG_THERMAL_TSENS=y
CONFIG_QTI_ADC_TM=y
CONFIG_QTI_VIRTUAL_SENSOR=y
CONFIG_QTI_QMI_SENSOR=y
CONFIG_SSC_INTERACTIVE=y
CONFIG_OPLUS_SENSOR_FB_QC=y
CONFIG_SENSORS_SSC=y
CONFIG_QTI_BCL_PMIC5=y
CONFIG_QTI_BCL_SOC_DRIVER=y
CONFIG_QTI_QMI_COOLING_DEVICE=y
@@ -425,6 +506,10 @@ CONFIG_REGULATOR_REFGEN=y
CONFIG_REGULATOR_RPMH=y
CONFIG_REGULATOR_STUB=y
CONFIG_REGULATOR_PM8008=y
#ifdef OPLUS_BUG_STABILITY
CONFIG_REGULATOR_TPS65132=y
CONFIG_REGULATOR_TPS65132_FOR_20267=y
#endif /* OPLUS_BUG_STABILITY */
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
@@ -439,6 +524,10 @@ CONFIG_DVB_MPQ_DEMUX=m
CONFIG_DVB_MPQ_SW=y
CONFIG_VIDEO_V4L2_VIDEOBUF2_CORE=y
CONFIG_DRM=y
#ifdef VENDOR_EDIT
CONFIG_DRM_MSM=y
#endif /* VENDOR_EDIT */
CONFIG_DRM_LONTIUM_LT9611UXC=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y
@@ -544,6 +633,7 @@ CONFIG_IPA_UT=y
CONFIG_USB_BAM=y
CONFIG_QCOM_GENI_SE=y
# CONFIG_QCOM_A53PLL is not set
# CONFIG_IPA3_REGDUMP is not set
CONFIG_QCOM_CLK_RPMH=y
CONFIG_SPMI_PMIC_CLKDIV=y
CONFIG_MSM_CLK_AOP_QMP=y
@@ -560,6 +650,7 @@ CONFIG_SDM_DISPCC_LAGOON=y
CONFIG_SDM_GPUCC_LAGOON=y
CONFIG_SDM_NPUCC_LAGOON=y
CONFIG_SDM_VIDEOCC_LAGOON=y
CONFIG_HWSPINLOCK=y
CONFIG_HWSPINLOCK_QCOM=y
CONFIG_MAILBOX=y
@@ -682,6 +773,15 @@ CONFIG_OVERLAY_FS=y
CONFIG_INCREMENTAL_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_OPLUS_FEATURE_EROFS=y
CONFIG_EROFS_FS=y
#ifdef OPLUS_FEATURE_EXFAT_SUPPORT
CONFIG_NLS_UTF8=y
CONFIG_EXFAT_FS=y
# endif
#ifdef OPLUS_FEATURE_EMMC_SDCARD_OPTIMIZE
CONFIG_EMMC_SDCARD_OPTIMIZE=y
#endif
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_EFIVAR_FS=y
@@ -708,7 +808,7 @@ CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_XZ_DEC=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_CONSOLE_UNHASHED_POINTERS=y
# CONFIG_DEBUG_CONSOLE_UNHASHED_POINTERS is not set
CONFIG_DEBUG_MODULE_LOAD_INFO=y
CONFIG_DEBUG_INFO=y
CONFIG_PAGE_OWNER=y
@@ -722,15 +822,20 @@ CONFIG_DEBUG_PANIC_ON_OOM=y
CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
CONFIG_PAGE_POISONING=y
CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
CONFIG_SLUB_DEBUG_ON=y
CONFIG_DEBUG_KMEMLEAK=y
#ifndef VENDOR_EDIT
#CONFIG_SLUB_DEBUG_ON=y
#CONFIG_DEBUG_KMEMLEAK=y
#else
CONFIG_DEBUG_KMEMLEAK=n
CONFIG_HAVE_DEBUG_KMEMLEAK=n
#endif//VENDOR_EDIT
CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_TIMEOUT=5
CONFIG_PANIC_TIMEOUT=-1
CONFIG_PANIC_ON_SCHED_BUG=y
CONFIG_PANIC_ON_RT_THROTTLING=y
CONFIG_SCHEDSTATS=y
@@ -752,8 +857,9 @@ CONFIG_IPC_LOGGING=y
CONFIG_QCOM_RTB=y
CONFIG_QCOM_RTB_SEPARATE_CPUS=y
CONFIG_FUNCTION_TRACER=y
CONFIG_PREEMPTIRQ_EVENTS=y
CONFIG_IRQSOFF_TRACER=y
#ifndef VENDOR_EDIT
#CONFIG_PREEMPTIRQ_EVENTS=y
#endif//VENDOR_EDITCONFIG_IRQSOFF_TRACER=y
CONFIG_PREEMPT_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_LKDTM=m
@@ -775,3 +881,269 @@ CONFIG_CORESIGHT_HWEVENT=y
CONFIG_CORESIGHT_DUMMY=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_TGU=y
#ifdef OPLUS_SYSTEM_KERNEL
#/*Add for oplus project*/
CONFIG_OPLUS_COMMON_SOFT=y
CONFIG_OPLUS_DEVICE_IFNO=y
CONFIG_OPLUS_RF_CABLE_MONITOR=y
CONFIG_RECORD_MDMRST=y
#endif
#ifdef OPLUS_FEATURE_POWERINFO_STANDBY
CONFIG_OPLUS_WAKELOCK_PROFILER=y
#endif /* OPLUS_FEATURE_POWERINFO_STANDBY */
# ifdef VENDOR_EDIT
CONFIG_CS_F61_NDT=y
# endif /* VENDOR_EDIT */
#ifdef OPLUS_SYSTEM_KERNEL
#all system oplus feature writer here
CONFIG_OPLUS_FEATURE_UBOOT_LOG=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_OPLUS_FEATURE_HUNG_TASK_ENHANCE=y
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=60
CONFIG_OPLUS_FEATURE_FEEDBACK=y
CONFIG_OPLUS_FEATURE_OPROJECT=y
CONFIG_OPLUS_FEATURE_CMDLINE=y
CONFIG_OPLUS_FEATURE_PROJECTINFO=y
CONFIG_OPLUS_FEATURE_PHOENIX=y
CONFIG_OPLUS_FEATURE_PHOENIX_REBOOT_SPEED=y
CONFIG_OPLUS_FEATURE_KMSG_WB=y
CONFIG_OPLUS_FEATURE_SHUTDOWN_SPEED=y
CONFIG_OPLUS_FEATURE_OLC=y
CONFIG_OPLUS_FEATURE_FDLEAK_CHECK=y
#add for qcom minidump customized
CONFIG_OPLUS_FEATURE_QCOM_MINIDUMP_ENHANCE=y
#Add for shutdown detect
CONFIG_OPLUS_FEATURE_SHUTDOWN_DETECT=y
#add slabtrace function
CONFIG_OPLUS_FEATURE_SLABTRACE_DEBUG=y
#enable oplus misc feature
#CONFIG_OPLUS_FEATURE_MISC is not set
#endif
#ifdef OPLUS_FEATURE_QCOM_PMICWD
#Add for qcom pmic watchdog
CONFIG_OPLUS_FEATURE_QCOM_PMICWD=y
#endif
#ifdef OPLUS_FEATURE_FINGERPRINT
CONFIG_OPLUS_FINGERPRINT=y
CONFIG_OPLUS_FINGERPRINT_QCOM=y
CONFIG_OPLUS_FINGERPRINT_GOODIX_OPTICAL=y
CONFIG_OPLUS_FINGERPRINT_JIIOV_OPTICAL=y
CONFIG_OPLUS_FINGERPRINT_SILEAD=y
CONFIG_OPLUS_FINGERPRINT_GOODIX=y
#endif
CONFIG_OPLUS_FINGERPRINT_GKI_DISABLE=y
#ifdef OPLUS_FEATURE_SECURITY_COMMON
CONFIG_OPLUS_SECURE=y
CONFIG_OPLUS_SECURE_QCOM=y
CONFIG_OPLUS_SECURE_COMMON=y
#endif /* OPLUS_FEATURE_SECURITY_COMMON */
#ifdef OPLUS_FEATURE_HANS_FREEZE
CONFIG_OPLUS_HANS=y
#endif /*OPLUS_FEATURE_HANS_FREEZE*/
#ifdef VENDOR_EDIT//Qingjun.Wang@BSP.Haptic,add 2020/03/17 for vib aw8697
CONFIG_AW8697_HAPTIC=y
#endif
#ifdef OPLUS_FEATURE_HEALTHINFO
CONFIG_OPLUS_MEM_MONITOR=y
CONFIG_FG_TASK_UID=y
CONFIG_OPLUS_HEALTHINFO=y
CONFIG_SLUB_DEBUG=y
#endif /* OPLUS_FEATURE_HEALTHINFO */
#ifdef OPLUS_FEATURE_HEALTHINFO
CONFIG_OPLUS_JANK_INFO=y
#endif /* OPLUS_FEATURE_HEALTHINFO */
#ifdef OPLUS_FEATURE_ZRAM_OPT
CONFIG_OPLUS_ZRAM_OPT=y
CONFIG_CRYPTO_LZ4=y
CONFIG_PGTABLE_MAPPING=y
CONFIG_CRYPTO_ZSTD=y
#endif
#ifdef OPLUS_FEATURE_MULTI_KSWAPD
CONFIG_OPLUS_MULTI_KSWAPD=y
CONFIG_KSWAPD_UNBIND_MAX_CPU=y
#endif
#ifdef VENDOR_EDIT
CONFIG_REGULATOR_PM8008=y
#ifdef OPLUS_FEATURE_DUMPDEVICE
CONFIG_PSTORE=y
CONFIG_PSTORE_CONSOLE=y
CONFIG_PSTORE_PMSG=y
CONFIG_PSTORE_RAM=y
#endif
#ifdef OPLUS_FEATURE_IOMONITOR
CONFIG_IOMONITOR=y
#CONFIG_IOMONITOR_WITH_F2FS=n
#endif /*OPLUS_FEATURE_IOMONITOR*/
CONFIG_OPLUS_FEATURE_PMIC_MONITOR=y
CONFIG_OPLUS_FEATURE_OF2FS=y
CONFIG_OPLUS_FEATURE_PANIC_FLUSH=y
CONFIG_OPLUS_FEATURE_OEXT4=y
CONFIG_F2FS_BD_STAT=y
#CONFIG_F2FS_GRADING_SSR is not set
#ifdef OPLUS_FEATURE_LOWMEM_DBG
CONFIG_OPLUS_FEATURE_LOWMEM_DBG=y
#endif /* OPLUS_FEATURE_LOWMEM_DBG */
#ifdef OPLUS_FEATURE_SCHED_ASSIST
CONFIG_BLK_WBT_SQ=y
CONFIG_BLK_WBT=y
CONFIG_OPLUS_FEATURE_UXIO_FIRST=y
#endif /*OPLUS_FEATURE_SCHED_ASSIST*/
#ifdef OPLUS_FEATURE_MEMLEAK_DETECT
CONFIG_KMALLOC_DEBUG=y
CONFIG_VMALLOC_DEBUG=y
CONFIG_DUMP_TASKS_MEM=y
#endif /*OPLUS_FEATURE_MEMLEAK_DETECT*/
#ifdef OPLUS_FEATURE_MEMLEAK_DETECT
CONFIG_SVELTE=y
#endif
#ifdef VEDOR_EDIT
CONFIG_OPLUS_FEATURE_RECORD_MDMRST=y
#endif
#ifdef OPLUS_ARCH_EXTENDS
CONFIG_OPLUS_FEATURE_MM_FEEDBACK=y
#endif
#ifdef OPLUS_FEATURE_MIDAS
CONFIG_OPLUS_FEATURE_MIDAS=y
#endif
#ifdef CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE
CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE=y
#endif
#ifdef OPLUS_FEATURE_ACM
CONFIG_OPLUS_FEATURE_ACM=y
CONFIG_OPLUS_FEATURE_ACM_LOGGING=y
#endif /* OPLUS_FEATURE_ACM */
#ifdef OPLUS_BUG_STABILITY
CONFIG_HARDEN_BRANCH_PREDICTOR=y
#endif
CONFIG_OPLUS_FEATURE_THEIA=y
##endif /* OPLUS_FEATURE_THEIA */
#ifdef OPLUS_FEATURE_SAUPWK
CONFIG_OPLUS_FEATURE_SAUPWK=y
#endif /* OPLUS_FEATURE_SAUPWK */
#ifdef OPLUS_FEATURE_DUMP_DEVICE_INFO
CONFIG_OPLUS_FEATURE_DUMP_DEVICE_INFO=y
#endif /* OPLUS_FEATURE_DUMP_DEVICE_INFO */
#ifdef OPLUS_BUG_STABILITY
CONFIG_UNMAP_KERNEL_AT_EL0=y
#endif
#ifdef OPLUS_FEATURE_GPIO_NC
CONFIG_OPLUS_GPIO_NC=y
#endif
#ifdef OPLUS_FEATURE_BOOTLOADER_LOG
CONFIG_BOOTLOADER_LOG=y
#endif
#ifdef OPLUS_FEATURE_PARAM
CONFIG_PARAM_READ_WRITE=y
#endif
#ifdef OPLUS_FEATURE_TP_BASIC
CONFIG_SIM_DETECT=y
#endif
#ifdef OPLUS_FEATURE_TP_BASIC
CONFIG_OPLUS_FW_UPDATE=y
#endif /*OPLUS_FEATURE_TP_BASIC*/
#ifdef OPLUS_FEATURE_ION_BOOSTPOOL
#CONFIG_OPLUS_ION_BOOSTPOOL=y ,Add for disable boost pool ,fill generic ION POOL
#endif /* OPLUS_FEATURE_ION_BOOSTPOOL */
#ifdef OPLUS_FEATURE_WIFI_SLA
#todo: need to change to m when GKI
CONFIG_OPLUS_FEATURE_WIFI_SLA=y
#endif /* OPLUS_FEATURE_WIFI_SLA */
#ifdef OPLUS_FEATURE_WIFI_ROUTERBOOST
CONFIG_OPLUS_FEATURE_WIFI_ROUTERBOOST=m
#endif /* OPLUS_FEATURE_WIFI_ROUTERBOOST */
#ifdef OPLUS_FEATURE_DATA_EVAL
CONFIG_OPLUS_FEATURE_DATA_EVAL=y
#endif /* OPLUS_FEATURE_DATA_VAL */
#ifdef OPLUS_FEATURE_DATA_LIMIT
CONFIG_IFB=y
CONFIG_NET_SCH_MULTIQ=y
CONFIG_NET_SCH_NETEM=y
CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_FLOW=y
CONFIG_NET_EMATCH_CMP=y
CONFIG_NET_EMATCH_NBYTE=y
CONFIG_NET_EMATCH_META=y
CONFIG_NET_EMATCH_TEXT=y
CONFIG_NET_ACT_GACT=y
CONFIG_NET_ACT_MIRRED=y
CONFIG_NET_ACT_SKBEDIT=y
#endif /* OPLUS_FEATURE_DATA_LIMIT */
CONFIG_DEBUG_SPINLOCK=y
#CONFIG_NANDSWAP=y
#CONFIG_NANDSWAP_DEBUG=y
#ifdef OPLUS_FEATURE_IM
#ted.lin@optimization add for im module
CONFIG_OPLUS_FEATURE_IM=y
#endif /* OPLUS_FEATURE_IM */
CONFIG_FRAME_WARN=4096
CONFIG_DYNAMIC_TUNNING_SWAPPINESS=y
#ifdef OPLUS_FEATURE_HYBRIDSWAP
CONFIG_HYBRIDSWAP=y
CONFIG_HYBRIDSWAP_SWAPD=y
CONFIG_HYBRIDSWAP_CORE=y
#endif
#ifdef OPLUS_FEATURE_UID_PERF
CONFIG_OPLUS_FEATURE_UID_PERF=y
#endif
#ifdef OPLUS_FEATURE_ESIM
CONFIG_OPLUS_GPIO=y
#endif /* OPLUS_FEATURE_ESIM */
#ifdef OPLUS_FEATURE_DNS_HOOK
CONFIG_OPLUS_FEATURE_DNS_HOOK=y
#endif /* OPLUS_FEATURE_DNS_HOOK */
#ifdef OPLUS_FEATURE_STATS_CALC
CONFIG_OPLUS_FEATURE_STATS_CALC=y
#endif /* OPLUS_FEATURE_STATS_CALC */
#ifdef OPLUS_FEATURE_TPD
CONFIG_OPLUS_FEATURE_TPD=y
#endif
#ifdef OPLUS_FEATURE_MDMFEATURE
CONFIG_OPLUS_FEATURE_MDMFEATURE=y
#endif
#ifdef OPLUS_FEATURE_GAME_OPT
CONFIG_OPLUS_FEATURE_GAME_OPT=y
#endif
CONFIG_OPLUS_BINDER_STRATEGY=n
CONFIG_OPLUS_FEATURE_CPU_JANKINFO=y

View File

@@ -200,5 +200,4 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
}
int set_memory_valid(unsigned long addr, int numpages, int enable);
#endif

View File

@@ -571,6 +571,20 @@ enum mitigation_state arm64_get_spectre_bhb_state(void);
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
u8 spectre_bhb_loop_affected(int scope);
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
/* Check whether hardware update of the Access flag is supported */
static inline bool cpu_has_hw_af(void)
{
u64 mmfr1;
if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM))
return false;
mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
return cpuid_feature_extract_unsigned_field(mmfr1,
ID_AA64MMFR1_HADBS_SHIFT);
}
#endif /* __ASSEMBLY__ */
#endif

View File

@@ -115,9 +115,20 @@ static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
LOG_BARRIER; \
})
#ifdef OPLUS_FEATURE_CHG_BASIC
#define __raw_write_logged_oplus_vooc(v, a, _t) ({ \
volatile void __iomem *_a = (a); \
__raw_write##_t##_no_log((v), _a); \
})
#endif /* OPLUS_FEATURE_CHG_BASIC */
#define __raw_writeb(v, a) __raw_write_logged((v), a, b)
#define __raw_writew(v, a) __raw_write_logged((v), a, w)
#define __raw_writel(v, a) __raw_write_logged((v), a, l)
#ifdef OPLUS_FEATURE_CHG_BASIC
#define __raw_writel_oplus_vooc(v, a) __raw_write_logged_oplus_vooc((v), a, l)
#endif /* OPLUS_FEATURE_CHG_BASIC */
#define __raw_writeq(v, a) __raw_write_logged((v), a, q)
#define __raw_read_logged(a, _l, _t) ({ \
@@ -132,9 +143,20 @@ static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
__a; \
})
#ifdef OPLUS_FEATURE_CHG_BASIC
#define __raw_read_logged_oplus_vooc(a, _l, _t) ({ \
_t __a; \
const volatile void __iomem *_a = (const volatile void __iomem *)(a); \
__a = __raw_read##_l##_no_log(_a); \
})
#endif /* OPLUS_FEATURE_CHG_BASIC */
#define __raw_readb(a) __raw_read_logged((a), b, u8)
#define __raw_readw(a) __raw_read_logged((a), w, u16)
#define __raw_readl(a) __raw_read_logged((a), l, u32)
#ifdef OPLUS_FEATURE_CHG_BASIC
#define __raw_readl_oplus_vooc(a) __raw_read_logged_oplus_vooc((a), l, u32)
#endif /* OPLUS_FEATURE_CHG_BASIC */
#define __raw_readq(a) __raw_read_logged((a), q, u64)
/* IO barriers */
@@ -167,11 +189,18 @@ static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
#define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; })
#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
#ifdef OPLUS_FEATURE_CHG_BASIC
#define readl_relaxed_oplus_vooc(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl_oplus_vooc(c)); __r; })
#endif /* OPLUS_FEATURE_CHG_BASIC */
#define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
#define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c)))
#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
#ifdef OPLUS_FEATURE_CHG_BASIC
#define writel_relaxed_oplus_vooc(v,c) ((void)__raw_writel_oplus_vooc((__force u32)cpu_to_le32(v),(c)))
#endif /* OPLUS_FEATURE_CHG_BASIC */
#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
#define readb_relaxed_no_log(c) ({ u8 __v = __raw_readb_no_log(c); __v; })
@@ -198,11 +227,17 @@ static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(__v); __v; })
#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(__v); __v; })
#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; })
#ifdef OPLUS_FEATURE_CHG_BASIC
#define readl_oplus_vooc(c) ({ u32 __v = readl_relaxed_oplus_vooc(c); __v; })
#endif /* OPLUS_FEATURE_CHG_BASIC */
#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(__v); __v; })
#define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); })
#define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); })
#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); })
#ifdef OPLUS_FEATURE_CHG_BASIC
#define writel_oplus_vooc(v,c) ({ writel_relaxed_oplus_vooc((v),(c)); })
#endif /* OPLUS_FEATURE_CHG_BASIC */
#define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c)); })
#define readb_no_log(c) \

View File

@@ -32,6 +32,7 @@
#ifndef __ASSEMBLY__
#include <asm/cpufeature.h>
#include <asm/pgtable-types.h>
#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)

View File

@@ -782,6 +782,8 @@ extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
extern int kern_addr_valid(unsigned long addr);
#define arch_has_hw_pte_young cpu_has_hw_af
#include <asm-generic/pgtable.h>
void pgd_cache_init(void);

View File

@@ -73,3 +73,7 @@ extra-y += $(head-y) vmlinux.lds
ifeq ($(CONFIG_DEBUG_EFI),y)
AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\""
endif
#ifdef CONFIG_OPLUS_SECURE_GUARD
obj-$(CONFIG_OPLUS_SECURE_GUARD) += rootguard/
#endif /* CONFIG_OPLUS_SECURE_GUARD */

View File

@@ -180,6 +180,21 @@ int main(void)
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
DEFINE(TRAMP_VALIAS, TRAMP_VALIAS);
#endif
#ifdef CONFIG_OPLUS_SECURE_GUARD
#ifdef CONFIG_OPLUS_ROOT_CHECK
DEFINE(PROOT_TSK_CRED, offsetof(struct task_struct, cred));
DEFINE(PROOT_CRED_UID, offsetof(struct cred, uid));
DEFINE(PROOT_CRED_EUID, offsetof(struct cred, euid));
DEFINE(PROOT_CRED_FSUID, offsetof(struct cred, fsuid));
#ifdef CONFIG_THREAD_INFO_IN_TASK
DEFINE(PROOT_THREAD_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
#else
DEFINE(PROOT_THREAD_TSK, offsetof(struct thread_info,task));
DEFINE(PROOT_THREAD_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
#endif
#endif /* CONFIG_OPLUS_ROOT_CHECK */
#endif /* CONFIG_OPLUS_SECURE_GUARD */
#ifdef CONFIG_ARM_SDE_INTERFACE
DEFINE(SDEI_EVENT_INTREGS, offsetof(struct sdei_registered_event, interrupted_regs));
DEFINE(SDEI_EVENT_PRIORITY, offsetof(struct sdei_registered_event, priority));

View File

@@ -35,6 +35,13 @@ static __init u64 get_kaslr_seed(void *fdt)
if (node < 0)
return 0;
#ifdef OPLUS_BUG_STABILITY
/* Aging/kasan version disable kaslr, easier to analysis problem */
#ifdef CONFIG_KASAN
return 0;
#endif
#endif
prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
if (!prop || len != sizeof(u64))
return 0;

View File

@@ -68,6 +68,8 @@ unsigned long __stack_chk_guard __ro_after_init;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
#ifndef CONFIG_OPLUS_FEATURE_QCOM_MINIDUMP_ENHANCE
/*
* Function pointers to optional machine specific functions
*/
@@ -77,6 +79,19 @@ EXPORT_SYMBOL_GPL(pm_power_off);
void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
EXPORT_SYMBOL_GPL(arm_pm_restart);
#else
#include <soc/oplus/system/qcom_minidump_enhance.h>
/*
* Function pointers to optional machine specific functions
*/
void (*pm_power_off)(void) = do_poweroff_early;
EXPORT_SYMBOL_GPL(pm_power_off);
void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd) = do_restart_early;
#endif /* CONFIG_OPLUS_FEATURE_QCOM_MINIDUMP_ENHANCE */
/*
* This is our default idle handler.
*/
@@ -289,13 +304,18 @@ void __show_regs(struct pt_regs *regs)
sp = regs->sp;
top_reg = 29;
}
show_regs_print_info(KERN_DEFAULT);
print_pstate(regs);
if (!user_mode(regs)) {
printk("pc : %pS\n", (void *)regs->pc);
printk("lr : %pS\n", (void *)lr);
#ifdef CONFIG_OPLUS_FEATURE_QCOM_MINIDUMP_ENHANCE
printk("pc : %016llx\n", regs->pc);
printk("lr : %016llx\n", lr);
#endif
} else {
printk("pc : %016llx\n", regs->pc);
printk("lr : %016llx\n", lr);

1
arch/arm64/kernel/rootguard Symbolic link
View File

@@ -0,0 +1 @@
../../../../../vendor/oplus/kernel/secureguard/rootguard

View File

@@ -17,6 +17,11 @@
long compat_arm_syscall(struct pt_regs *regs, int scno);
long sys_ni_syscall(void);
#ifdef CONFIG_OPLUS_SECURE_GUARD
extern void oplus_invoke_syscall(struct pt_regs *regs, unsigned int scno,
unsigned int sc_nr,
const syscall_fn_t syscall_table[]);
#else
static long do_ni_syscall(struct pt_regs *regs, int scno)
{
#ifdef CONFIG_COMPAT
@@ -41,7 +46,6 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
const syscall_fn_t syscall_table[])
{
long ret;
if (scno < sc_nr) {
syscall_fn_t syscall_fn;
syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
@@ -55,6 +59,7 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
regs->regs[0] = ret;
}
#endif /* CONFIG_OPLUS_SECURE_GUARD */
static inline bool has_syscall_work(unsigned long flags)
{
@@ -113,9 +118,11 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
if (scno == NO_SYSCALL)
goto trace_exit;
}
#ifdef CONFIG_OPLUS_SECURE_GUARD
oplus_invoke_syscall(regs, scno, sc_nr, syscall_table);
#else
invoke_syscall(regs, scno, sc_nr, syscall_table);
#endif /* CONFIG_OPLUS_SECURE_GUARD */
/*
* The tracing status may have changed under our feet, so we have to
* check again. However, if we were tracing entry, then we always trace

1
arch/arm64/mm/arch_mmap.h Symbolic link
View File

@@ -0,0 +1 @@
../../../../../vendor/oplus/kernel/oplus_performance/gloom_new/arch_mmap.h

View File

@@ -48,6 +48,9 @@
#include <soc/qcom/scm.h>
#include <acpi/ghes.h>
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
#include <linux/iomonitor/iomonitor.h>
#endif /*OPLUS_FEATURE_IOMONITOR*/
struct fault_info {
int (*fn)(unsigned long addr, unsigned int esr,
@@ -567,6 +570,9 @@ done:
*/
if (major) {
tsk->maj_flt++;
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
iomonitor_update_fs_stats(FS_MAJOR_FAULT, 1);
#endif /*OPLUS_FEATURE_IOMONITOR*/
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
addr);
} else {

View File

@@ -30,6 +30,9 @@
#include <linux/random.h>
#include <asm/cputype.h>
#if defined(OPLUS_FEATURE_VIRTUAL_RESERVE_MEMORY) && defined(CONFIG_VIRTUAL_RESERVE_MEMORY)
#include "arch_mmap.h"
#endif
/*
* Leave enough space between the mmap area and the stack to honour ulimit in

View File

@@ -62,6 +62,7 @@ config X86
select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_REFCOUNT
select ARCH_HAS_NONLEAF_PMD_YOUNG
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
select ARCH_HAS_UACCESS_MCSAFE if X86_64 && X86_MCE
select ARCH_HAS_SET_MEMORY

View File

@@ -826,7 +826,8 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
static inline int pmd_bad(pmd_t pmd)
{
return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
return (pmd_flags(pmd) & ~(_PAGE_USER | _PAGE_ACCESSED)) !=
(_KERNPG_TABLE & ~_PAGE_ACCESSED);
}
static inline unsigned long pages_to_mb(unsigned long npg)
@@ -1442,6 +1443,12 @@ static inline bool arch_has_pfn_modify_check(void)
return boot_cpu_has_bug(X86_BUG_L1TF);
}
#define arch_has_hw_pte_young arch_has_hw_pte_young
static inline bool arch_has_hw_pte_young(void)
{
return true;
}
#include <asm-generic/pgtable.h>
#endif /* __ASSEMBLY__ */

View File

@@ -560,7 +560,7 @@ int ptep_test_and_clear_young(struct vm_area_struct *vma,
return ret;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
@@ -572,6 +572,9 @@ int pmdp_test_and_clear_young(struct vm_area_struct *vma,
return ret;
}
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int pudp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pud_t *pudp)
{

View File

@@ -36,7 +36,7 @@ config LBDAF
This option is required to support the full capacity of large
(2TB+) block devices, including RAID, disk, Network Block Device,
Logical Volume Manager (LVM) and loopback.
This option also enables support for single files larger than
2TB.
@@ -246,3 +246,7 @@ config BLK_MQ_RDMA
default y
source block/Kconfig.iosched
#ifdef OPLUS_FEATURE_SCHED_ASSIST
source block/uxio_first/Kconfig
#endif

View File

@@ -39,4 +39,7 @@ obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o
obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += keyslot-manager.o bio-crypt-ctx.o \
blk-crypto.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o
#ifdef OPLUS_FEATURE_SCHED_ASSIST
obj-$(CONFIG_OPLUS_FEATURE_UXIO_FIRST) += uxio_first/
#endif /*OPLUS_FEATURE_SCHED_ASSIST*/

View File

@@ -46,6 +46,14 @@
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
#include <linux/iomonitor/iomonitor.h>
#endif /*OPLUS_FEATURE_IOMONITOR*/
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
#include "uxio_first/uxio_first_opt.h"
#endif
#ifdef CONFIG_DEBUG_FS
struct dentry *blk_debugfs_root;
#endif
@@ -190,6 +198,9 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
memset(rq, 0, sizeof(*rq));
INIT_LIST_HEAD(&rq->queuelist);
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
INIT_LIST_HEAD(&rq->ux_fg_bg_list);
#endif
INIT_LIST_HEAD(&rq->timeout_list);
rq->cpu = -1;
rq->q = q;
@@ -1016,6 +1027,11 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
return NULL;
INIT_LIST_HEAD(&q->queue_head);
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
INIT_LIST_HEAD(&q->ux_head);
INIT_LIST_HEAD(&q->fg_head);
INIT_LIST_HEAD(&q->bg_head);
#endif
q->last_merge = NULL;
q->end_sector = 0;
q->boundary_rq = NULL;
@@ -1476,7 +1492,9 @@ out:
*/
if (ioc_batching(q, ioc))
ioc->nr_batch_requests--;
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
iomonitor_init_reqstats(rq);
#endif /*OPLUS_FEATURE_IOMONITOR*/
trace_block_getrq(q, bio, op);
return rq;
@@ -1774,8 +1792,11 @@ void __blk_put_request(struct request_queue *q, struct request *req)
/* this is a bio leak */
WARN_ON(req->bio != NULL);
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
rq_qos_done(q, req, (bool)((req->cmd_flags & REQ_FG)||(req->cmd_flags & REQ_UX)));
#else
rq_qos_done(q, req);
#endif
/*
* Request may not have originated from ll_rw_blk. if not,
* it didn't come out of our reserved rq pools
@@ -1986,9 +2007,14 @@ out:
void blk_init_request_from_bio(struct request *req, struct bio *bio)
{
struct io_context *ioc = rq_ioc(bio);
if (bio->bi_opf & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK;
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
if (bio->bi_opf & REQ_UX)
req->cmd_flags |= REQ_UX;
else if (bio->bi_opf & REQ_FG)
req->cmd_flags |= REQ_FG;
#endif
req->__sector = bio->bi_iter.bi_sector;
if (ioprio_valid(bio_prio(bio)))
@@ -2570,11 +2596,17 @@ blk_qc_t submit_bio(struct bio *bio)
if (op_is_write(bio_op(bio))) {
count_vm_events(PGPGOUT, count);
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
iomonitor_update_vm_stats(PGPGOUT, count);
#endif /*OPLUS_FEATURE_IOMONITOR*/
} else {
if (bio_flagged(bio, BIO_WORKINGSET))
workingset_read = true;
task_io_account_read(bio->bi_iter.bi_size);
count_vm_events(PGPGIN, count);
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
iomonitor_update_vm_stats(PGPGIN, count);
#endif /*OPLUS_FEATURE_IOMONITOR*/
}
if (unlikely(block_dump)) {
@@ -2586,7 +2618,12 @@ blk_qc_t submit_bio(struct bio *bio)
bio_devname(bio, b), count);
}
}
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
if (test_task_ux(current))
bio->bi_opf |= REQ_UX;
else if (high_prio_for_task(current))
bio->bi_opf |= REQ_FG;
#endif
/*
* If we're reading data that is part of the userspace
* workingset, count submission time as memory stall. When the
@@ -2793,7 +2830,11 @@ void blk_account_io_done(struct request *req, u64 now)
* Don't process normal requests when queue is suspended
* or in the process of suspending/resuming
*/
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
bool blk_pm_allow_request(struct request *rq)
#else
static bool blk_pm_allow_request(struct request *rq)
#endif
{
switch (rq->q->rpm_status) {
case RPM_RESUMING:
@@ -2806,7 +2847,11 @@ static bool blk_pm_allow_request(struct request *rq)
}
}
#else
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
bool blk_pm_allow_request(struct request *rq)
#else
static bool blk_pm_allow_request(struct request *rq)
#endif
{
return true;
}
@@ -2856,14 +2901,24 @@ static struct request *elv_next_request(struct request_queue *q)
WARN_ON_ONCE(q->mq_ops);
while (1) {
list_for_each_entry(rq, &q->queue_head, queuelist) {
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
if (likely(sysctl_uxio_io_opt)){
rq = smart_peek_request(q);
if (rq)
return rq;
} else
{
#endif
list_for_each_entry(rq, &q->queue_head, queuelist) {
if (blk_pm_allow_request(rq))
return rq;
if (rq->rq_flags & RQF_SOFTBARRIER)
break;
}
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
}
#endif
/*
* Flush request is running and flush request isn't queueable
* in the drive, we can hold the queue till flush request is
@@ -2927,6 +2982,10 @@ struct request *blk_peek_request(struct request_queue *q)
* not be passed by new incoming requests
*/
rq->rq_flags |= RQF_STARTED;
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
rq->req_td = ktime_get();
#endif /*OPLUS_FEATURE_IOMONITOR*/
trace_block_rq_issue(q, rq);
}
@@ -2986,7 +3045,9 @@ struct request *blk_peek_request(struct request_queue *q)
break;
}
}
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
iomonitor_record_io_history(rq);
#endif /*OPLUS_FEATURE_IOMONITOR*/
return rq;
}
EXPORT_SYMBOL(blk_peek_request);
@@ -2999,14 +3060,27 @@ static void blk_dequeue_request(struct request *rq)
BUG_ON(ELV_ON_HASH(rq));
list_del_init(&rq->queuelist);
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
list_del_init(&rq->ux_fg_bg_list);
#endif
/*
* the time frame between a request being removed from the lists
* and to it is freed is accounted as io that is in progress at
* the driver side.
*/
if (blk_account_rq(rq))
q->in_flight[rq_is_sync(rq)]++;
#ifdef OPLUS_FEATURE_HEALTHINFO
// Add for ioqueue
#ifdef CONFIG_OPLUS_HEALTHINFO
if (blk_account_rq(rq)) {
q->in_flight[rq_is_sync(rq)]++;
ohm_ioqueue_add_inflight(q, rq);
}
#else
if (blk_account_rq(rq))
q->in_flight[rq_is_sync(rq)]++;
#endif
#endif
}
/**
@@ -3117,6 +3191,9 @@ bool blk_update_request(struct request *req, blk_status_t error,
int total_bytes;
trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
iomonitor_record_reqstats(req, nr_bytes);
#endif /*OPLUS_FEATURE_IOMONITOR*/
if (!req->bio)
return false;
@@ -3254,7 +3331,11 @@ void blk_finish_request(struct request *req, blk_status_t error)
blk_account_io_done(req, now);
if (req->end_io) {
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
rq_qos_done(q, req, (bool)((req->cmd_flags & REQ_FG)||(req->cmd_flags & REQ_UX)));
#else
rq_qos_done(q, req);
#endif
req->end_io(req, error);
} else {
if (blk_bidi_rq(req))

View File

@@ -76,6 +76,10 @@
#include "blk-mq-tag.h"
#include "blk-mq-sched.h"
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
#include "uxio_first/uxio_first_opt.h"
#endif
/* PREFLUSH/FUA sequences */
enum {
REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
@@ -142,6 +146,9 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front)
list_add(&rq->queuelist, &rq->q->queue_head);
else
list_add_tail(&rq->queuelist, &rq->q->queue_head);
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
queue_throtl_add_request(rq->q, rq, add_front);
#endif /*OPLUS_FEATURE_SCHED_ASSIST*/
return true;
}
}
@@ -499,7 +506,14 @@ void blk_insert_flush(struct request *rq)
if (q->mq_ops)
blk_mq_request_bypass_insert(rq, false);
else
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
{
list_add_tail(&rq->queuelist, &q->queue_head);
queue_throtl_add_request(q, rq, false);
}
#else
list_add_tail(&rq->queuelist, &q->queue_head);
#endif
return;
}

View File

@@ -505,8 +505,11 @@ void blk_mq_free_request(struct request *rq)
if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
laptop_io_completion(q->backing_dev_info);
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
rq_qos_done(q, rq,(bool)((rq->cmd_flags & REQ_FG)||(rq->cmd_flags & REQ_UX)));
#else
rq_qos_done(q, rq);
#endif
if (blk_rq_rl(rq))
blk_put_rl(blk_rq_rl(rq));
@@ -529,7 +532,11 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
blk_account_io_done(rq, now);
if (rq->end_io) {
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
rq_qos_done(rq->q, rq,(bool)((rq->cmd_flags & REQ_FG)||(rq->cmd_flags & REQ_UX)));
#else
rq_qos_done(rq->q, rq);
#endif
rq->end_io(rq, error);
} else {
if (unlikely(blk_bidi_rq(rq)))

View File

@@ -36,7 +36,17 @@ void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
rqos->ops->cleanup(rqos, bio);
}
}
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
void rq_qos_done(struct request_queue *q, struct request *rq, bool fgux)
{
struct rq_qos *rqos;
for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
if (rqos->ops->done)
rqos->ops->done(rqos, rq, fgux);
}
}
#else
void rq_qos_done(struct request_queue *q, struct request *rq)
{
struct rq_qos *rqos;
@@ -46,7 +56,7 @@ void rq_qos_done(struct request_queue *q, struct request *rq)
rqos->ops->done(rqos, rq);
}
}
#endif
void rq_qos_issue(struct request_queue *q, struct request *rq)
{
struct rq_qos *rqos;

View File

@@ -29,7 +29,11 @@ struct rq_qos_ops {
void (*track)(struct rq_qos *, struct request *, struct bio *);
void (*issue)(struct rq_qos *, struct request *);
void (*requeue)(struct rq_qos *, struct request *);
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
void (*done)(struct rq_qos *, struct request *, bool);
#else
void (*done)(struct rq_qos *, struct request *);
#endif
void (*done_bio)(struct rq_qos *, struct bio *);
void (*cleanup)(struct rq_qos *, struct bio *);
void (*exit)(struct rq_qos *);
@@ -96,7 +100,11 @@ bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
bool rq_depth_calc_max_depth(struct rq_depth *rqd);
void rq_qos_cleanup(struct request_queue *, struct bio *);
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
void rq_qos_done(struct request_queue *, struct request *, bool);
#else
void rq_qos_done(struct request_queue *, struct request *);
#endif
void rq_qos_issue(struct request_queue *, struct request *);
void rq_qos_requeue(struct request_queue *, struct request *);
void rq_qos_done_bio(struct request_queue *q, struct bio *bio);

View File

@@ -393,6 +393,61 @@ static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
return count;
}
#ifdef OPLUS_FEATURE_HEALTHINFO
// Add for ioqueue
#ifdef CONFIG_OPLUS_HEALTHINFO
static ssize_t queue_show_ohm_inflight(struct request_queue *q, char *page)
{
ssize_t ret;
ret = sprintf(page, "async:%d\n", q->in_flight[0]);
ret += sprintf(page + ret, "sync:%d\n", q->in_flight[1]);
ret += sprintf(page + ret, "ux:%d\n", q->in_flight[2]);
ret += sprintf(page + ret, "fg:%d\n", q->in_flight[3]);
ret += sprintf(page + ret, "bg:%d\n", q->in_flight[4]);
return ret;
}
#endif
#endif /* OPLUS_FEATURE_HEALTHINFO */
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
static ssize_t queue_bg_max_depth_show(struct request_queue *q, char *page)
{
ssize_t ret;
if (!q->queue_tags)
return -EINVAL;
ret = sprintf(page, "%d\n", q->queue_tags->bg_max_depth);
return ret;
}
static ssize_t queue_bg_max_depth_store(struct request_queue *q,
const char *page, size_t count)
{
unsigned long val;
int ret;
if (!q->queue_tags)
return -EINVAL;
ret = queue_var_store(&val, page, count);
if (ret < 0)
return ret;
if (val > q->queue_tags->max_depth)
return -EINVAL;
q->queue_tags->bg_max_depth = val;
return (ssize_t)count;
}
static struct queue_sysfs_entry queue_bg_max_depth_entry = {
.attr = {.name = "bg_max_depth", .mode = S_IRUGO | S_IWUSR },
.show = queue_bg_max_depth_show,
.store = queue_bg_max_depth_store,
};
#endif
static ssize_t queue_poll_show(struct request_queue *q, char *page)
{
@@ -527,7 +582,6 @@ static struct queue_sysfs_entry queue_ra_entry = {
.show = queue_ra_show,
.store = queue_ra_store,
};
static struct queue_sysfs_entry queue_max_sectors_entry = {
.attr = {.name = "max_sectors_kb", .mode = 0644 },
.show = queue_max_sectors_show,
@@ -654,6 +708,15 @@ static struct queue_sysfs_entry queue_iostats_entry = {
.show = queue_show_iostats,
.store = queue_store_iostats,
};
#ifdef OPLUS_FEATURE_HEALTHINFO
// Add for ioqueue
#ifdef CONFIG_OPLUS_HEALTHINFO
static struct queue_sysfs_entry queue_ohm_inflight_entry = {
.attr = {.name = "ohm_inflight", .mode = S_IRUGO },
.show = queue_show_ohm_inflight,
};
#endif
#endif /* OPLUS_FEATURE_HEALTHINFO */
static struct queue_sysfs_entry queue_random_entry = {
.attr = {.name = "add_random", .mode = 0644 },
@@ -730,6 +793,14 @@ static struct attribute *default_attrs[] = {
&queue_nomerges_entry.attr,
&queue_rq_affinity_entry.attr,
&queue_iostats_entry.attr,
#if defined OPLUS_FEATURE_HEALTHINFO && defined CONFIG_OPLUS_HEALTHINFO
// Add for ioqueue
&queue_ohm_inflight_entry.attr,
#endif /* OPLUS_FEATURE_HEALTHINFO */
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
&queue_bg_max_depth_entry.attr,
#endif
&queue_random_entry.attr,
&queue_poll_entry.attr,
&queue_wc_entry.attr,

View File

@@ -10,6 +10,10 @@
#include "blk.h"
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
#include "uxio_first/uxio_first_opt.h"
#endif
/**
* blk_queue_find_tag - find a request by its tag and queue
* @q: The request queue for the device
@@ -110,6 +114,9 @@ init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
tags->real_max_depth = depth;
tags->max_depth = depth;
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
tags->bg_max_depth = BLK_MAX_BG_DEPTH;
#endif
tags->tag_index = tag_index;
tags->tag_map = tag_map;

View File

@@ -30,6 +30,9 @@
#define CREATE_TRACE_POINTS
#include <trace/events/wbt.h>
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
extern bool sysctl_wbt_enable;
#endif
static inline void wbt_clear_state(struct request *rq)
{
rq->wbt_flags = 0;
@@ -76,8 +79,14 @@ enum {
static inline bool rwb_enabled(struct rq_wb *rwb)
{
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
return sysctl_wbt_enable && rwb &&
rwb->enable_state != WBT_STATE_OFF_DEFAULT &&
rwb->wb_normal != 0;
#else
return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT &&
rwb->wb_normal != 0;
#endif
}
static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
@@ -182,7 +191,11 @@ static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
* Called on completion of a request. Note that it's also called when
* a request is merged, when the request gets freed.
*/
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
static void wbt_done(struct rq_qos *rqos, struct request *rq, bool fgux)
#else
static void wbt_done(struct rq_qos *rqos, struct request *rq)
#endif
{
struct rq_wb *rwb = RQWB(rqos);
@@ -192,7 +205,11 @@ static void wbt_done(struct rq_qos *rqos, struct request *rq)
rwb->sync_cookie = NULL;
}
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
if (wbt_is_read(rq) || fgux)
#else
if (wbt_is_read(rq))
#endif
wb_timestamp(rwb, &rwb->last_comp);
} else {
WARN_ON_ONCE(rq == rwb->sync_cookie);

View File

@@ -42,6 +42,9 @@
#include "blk.h"
#include "blk-mq-sched.h"
#include "blk-wbt.h"
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
#include "uxio_first/uxio_first_opt.h"
#endif
static DEFINE_SPINLOCK(elv_list_lock);
static LIST_HEAD(elv_list);
@@ -394,6 +397,9 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
}
list_add(&rq->queuelist, entry);
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
queue_throtl_add_request(q, rq, false);
#endif
}
EXPORT_SYMBOL(elv_dispatch_sort);
@@ -414,6 +420,9 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
list_add_tail(&rq->queuelist, &q->queue_head);
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
queue_throtl_add_request(q, rq, false);
#endif
}
EXPORT_SYMBOL(elv_dispatch_add_tail);
@@ -593,6 +602,12 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
*/
if (blk_account_rq(rq)) {
q->in_flight[rq_is_sync(rq)]--;
#ifdef OPLUS_FEATURE_HEALTHINFO
// Add for ioqueue
#ifdef CONFIG_OPLUS_HEALTHINFO
ohm_ioqueue_dec_inflight(q, rq);
#endif
#endif /* OPLUS_FEATURE_HEALTHINFO */
if (rq->rq_flags & RQF_SORTED)
elv_deactivate_rq(q, rq);
}
@@ -625,6 +640,10 @@ void elv_drain_elevator(struct request_queue *q)
void __elv_add_request(struct request_queue *q, struct request *rq, int where)
{
#if defined(OPLUS_FEATURE_IOMONITOR) && defined(CONFIG_IOMONITOR)
rq->req_ti = ktime_get();
#endif /*OPLUS_FEATURE_IOMONITOR*/
trace_block_rq_insert(q, rq);
blk_pm_add_request(q, rq);
@@ -647,12 +666,18 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
case ELEVATOR_INSERT_FRONT:
rq->rq_flags |= RQF_SOFTBARRIER;
list_add(&rq->queuelist, &q->queue_head);
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
queue_throtl_add_request(q, rq, true);
#endif
break;
case ELEVATOR_INSERT_BACK:
rq->rq_flags |= RQF_SOFTBARRIER;
elv_drain_elevator(q);
list_add_tail(&rq->queuelist, &q->queue_head);
#if defined(OPLUS_FEATURE_SCHED_ASSIST) && defined(CONFIG_OPLUS_FEATURE_UXIO_FIRST)
queue_throtl_add_request(q, rq, false);
#endif
/*
* We kick the queue here for the following reasons.
* - The elevator might have returned NULL previously
@@ -787,6 +812,12 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
*/
if (blk_account_rq(rq)) {
q->in_flight[rq_is_sync(rq)]--;
#ifdef OPLUS_FEATURE_HEALTHINFO
// Add for ioqueue
#ifdef CONFIG_OPLUS_HEALTHINFO
ohm_ioqueue_dec_inflight(q, rq);
#endif
#endif /* OPLUS_FEATURE_HEALTHINFO */
if ((rq->rq_flags & RQF_SORTED) &&
e->type->ops.sq.elevator_completed_req_fn)
e->type->ops.sq.elevator_completed_req_fn(q, rq);

View File

@@ -689,6 +689,36 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
* 1 if successful
*
*/
struct replace_partition_tbl {
char *old_name;
char *new_name;
};
static struct replace_partition_tbl tbl[] = {
{"oplus_sec", "oplus_sec"},
{"oppodycnvbk","oplusdycnvbk"},
{"oppostanvbk", "oplusstanvbk"},
{"opporeserve1", "oplusreserve1"},
{"opporeserve2", "oplusreserve2"},
{"opporeserve3", "oplusreserve3"},
{"opporeserve4", "oplusreserve4"},
{"opporeserve5", "oplusreserve5"},
};
static void oplus_replace_partition_name(char *name)
{
int part_idx = 0;
for (part_idx = 0; part_idx < ARRAY_SIZE(tbl); part_idx++) {
if (!strncmp(name, tbl[part_idx].old_name, strlen(tbl[part_idx].old_name))) {
pr_warn("rename partition name: %s->%s\n", name, tbl[part_idx].new_name);
memset(name, 0, strlen(tbl[part_idx].old_name));
strcpy(name, tbl[part_idx].new_name);
return;
}
}
}
int efi_partition(struct parsed_partitions *state)
{
gpt_header *gpt = NULL;
@@ -736,6 +766,7 @@ int efi_partition(struct parsed_partitions *state)
label_count++;
}
state->parts[i + 1].has_info = true;
oplus_replace_partition_name(&info->volname[0]);
}
kfree(ptes);
kfree(gpt);

View File

@@ -205,6 +205,12 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
__set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok);
__set_bit(GPCMD_SET_STREAMING, filter->write_ok);
__set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok);
#ifdef OPLUS_FEATURE_STORAGE_TOOL
// add write buffer command for common user
// add vendor command for common user
__set_bit(WRITE_BUFFER, filter->write_ok);
__set_bit(VENDOR_SPECIFIC_CDB, filter->write_ok);
#endif
}
int blk_verify_command(unsigned char *cmd, fmode_t mode)
@@ -424,6 +430,11 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
int err;
unsigned int in_len, out_len, bytes, opcode, cmdlen;
char *buffer = NULL;
#ifdef OPLUS_FEATURE_STORAGE_TOOL
// vendor cmd len is 16 and not 10 in spec.
// in current application ,only samsung health will use this cmd.
struct scsi_device *sdev = NULL;
#endif
if (!sic)
return -EINVAL;
@@ -456,6 +467,16 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
req = scsi_req(rq);
cmdlen = COMMAND_SIZE(opcode);
#ifdef OPLUS_FEATURE_STORAGE_TOOL
// vendor cmd len is 16 and not 10 in spec.
// in current application ,only samsung health will use this cmd.
sdev = (struct scsi_device*)(q->queuedata);
if ((VENDOR_SPECIFIC_CDB == opcode)
&&(0 == strncmp(sdev->vendor, "SAMSUNG ", 8))
){
cmdlen = 16;
}
#endif
/*
* get command and data to send to device, if any

1
block/uxio_first Symbolic link
View File

@@ -0,0 +1 @@
../../../vendor/oplus/kernel/oplus_performance/uxio_first

View File

@@ -23,7 +23,7 @@
#include <crypto/internal/scompress.h>
#define ZSTD_DEF_LEVEL 3
#define ZSTD_DEF_LEVEL 1
struct zstd_ctx {
ZSTD_CCtx *cctx;
@@ -248,6 +248,7 @@ static int __init zstd_mod_init(void)
if (ret)
crypto_unregister_alg(&alg);
pr_warn("ZSTD_DEF_LEVEL val %d\n", ZSTD_DEF_LEVEL);
return ret;
}

View File

@@ -232,4 +232,10 @@ source "drivers/sensors/Kconfig"
source "drivers/gpu/msm/Kconfig"
source "drivers/energy_model/Kconfig"
#ifdef OPLUS_NFC_BRINGUP
#Add for the kernel Macro for NXP PN557 NFC kernel
source "drivers/nfc/pn553-i2c/Kconfig"
source "drivers/nfc/p73-spi/Kconfig"
#endif /*OPLUS_NFC_BRINGUP*/
endmenu

View File

@@ -192,3 +192,4 @@ obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/
obj-$(CONFIG_SIOX) += siox/
obj-$(CONFIG_GNSS) += gnss/
obj-$(CONFIG_SENSORS_SSC) += sensors/
obj-$(CONFIG_SENSORS_SIMULATED_HALL) += sensors/

View File

@@ -54,6 +54,11 @@ config ANDROID_BINDER_IPC_SELFTEST
exhaustively with combinations of various buffer sizes and
alignments.
config OPLUS_BINDER_STRATEGY
default n
bool "config binder control"
help
it helps to reduce anr by restricting background app binder behavior.
endif # if ANDROID
endmenu

View File

@@ -81,6 +81,21 @@
#include "binder_internal.h"
#include "binder_trace.h"
#ifdef OPLUS_FEATURE_HANS_FREEZE
#include <linux/hans.h>
#endif /*OPLUS_FEATURE_HANS_FREEZE*/
#ifdef OPLUS_FEATURE_SCHED_ASSIST
#include <linux/sched_assist/sched_assist_binder.h>
#endif /* OPLUS_FEATURE_SCHED_ASSIST */
#ifdef CONFIG_OPLUS_FEATURE_INPUT_BOOST_V4
#include <linux/tuning/frame_boost_group.h>
#endif /* CONFIG_OPLUS_FEATURE_INPUT_BOOST_V4 */
#if IS_ENABLED(CONFIG_OPLUS_FEATURE_CPU_JANKINFO)
#include <linux/cpu_jankinfo/jank_tasktrack.h>
#endif
static HLIST_HEAD(binder_deferred_list);
static DEFINE_MUTEX(binder_deferred_lock);
@@ -136,6 +151,40 @@ module_param_named(devices, binder_devices_param, charp, 0444);
static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
static int binder_stop_on_user_error;
#if defined(CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE)
#include <linux/notifier.h>
#ifndef CONFIG_OPLUS_FEATURE_CPU_JANKINFO
#define OPLUS_MAX_SERVICE_NAME_LEN 32
#endif
#define OPLUS_MAGIC_SERVICE_NAME_OFFSET 76
struct binder_notify {
struct task_struct *caller_task;
struct task_struct *binder_task;
char service_name[OPLUS_MAX_SERVICE_NAME_LEN];
bool pending_async;
};
static ATOMIC_NOTIFIER_HEAD(binderevent_notif_chain);
int register_binderevent_notifier(struct notifier_block *nb) {
return atomic_notifier_chain_register(&binderevent_notif_chain, nb);
}
EXPORT_SYMBOL_GPL(register_binderevent_notifier);
int unregister_binderevent_notifier(struct notifier_block *nb) {
return atomic_notifier_chain_unregister(&binderevent_notif_chain, nb);
}
EXPORT_SYMBOL_GPL(unregister_binderevent_notifier);
int call_binderevent_notifiers(unsigned long val, void *v) {
return atomic_notifier_call_chain(&binderevent_notif_chain, val, v);
}
EXPORT_SYMBOL_GPL(call_binderevent_notifiers);
#endif // #if defined(CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE)
static int binder_set_stop_on_user_error(const char *val,
const struct kernel_param *kp)
{
@@ -174,6 +223,7 @@ module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
#define to_binder_fd_array_object(hdr) \
container_of(hdr, struct binder_fd_array_object, hdr)
#ifndef CONFIG_OPLUS_FEATURE_CPU_JANKINFO
enum binder_stat_types {
BINDER_STAT_PROC,
BINDER_STAT_THREAD,
@@ -191,6 +241,7 @@ struct binder_stats {
atomic_t obj_created[BINDER_STAT_COUNT];
atomic_t obj_deleted[BINDER_STAT_COUNT];
};
#endif
static struct binder_stats binder_stats;
@@ -227,6 +278,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
return e;
}
#ifndef CONFIG_OPLUS_FEATURE_CPU_JANKINFO
/**
* struct binder_work - work enqueued on a worklist
* @entry: node enqueued on list
@@ -246,6 +298,9 @@ struct binder_work {
BINDER_WORK_DEAD_BINDER_AND_CLEAR,
BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
} type;
#ifdef CONFIG_OPLUS_BINDER_STRATEGY
u64 ob_begin;
#endif
};
struct binder_error {
@@ -354,6 +409,9 @@ struct binder_node {
};
bool has_async_transaction;
struct list_head async_todo;
#if defined(CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE)
char service_name[OPLUS_MAX_SERVICE_NAME_LEN];
#endif
};
struct binder_ref_death {
@@ -513,6 +571,9 @@ struct binder_proc {
struct hlist_node deferred_work_node;
int deferred_work;
bool is_dead;
#ifdef OPLUS_FEATURE_SCHED_ASSIST
int proc_type;
#endif /* OPLUS_FEATURE_SCHED_ASSIST */
struct list_head todo;
struct binder_stats stats;
@@ -537,6 +598,9 @@ enum {
BINDER_LOOPER_STATE_INVALID = 0x08,
BINDER_LOOPER_STATE_WAITING = 0x10,
BINDER_LOOPER_STATE_POLL = 0x20,
#ifdef CONFIG_OPLUS_BINDER_STRATEGY
BINDER_LOOPER_STATE_BACKGROUND = 0x40,
#endif
};
/**
@@ -594,6 +658,7 @@ struct binder_thread {
bool is_dead;
struct task_struct *task;
};
#endif
struct binder_transaction {
int debug_id;
@@ -643,6 +708,58 @@ struct binder_object {
};
};
#if defined(CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE)
static void oplus_parse_service_name(struct binder_transaction_data *tr,
struct binder_proc *proc,
char *name) {
unsigned int i, len = 0;
char *tmp;
char c;
char sname[OPLUS_MAX_SERVICE_NAME_LEN];
if (NULL != tr && tr->target.handle == 0 && NULL != proc && NULL != proc->context) {
if (!strcmp(proc->context->name, "hwbinder")) {
strcpy(sname, "hwbinderService");
} else {
for (i = 0; (2 * i) < tr->data_size; i++) {
if ((2 * i) < OPLUS_MAGIC_SERVICE_NAME_OFFSET) {
continue;
}
if (len >= (OPLUS_MAX_SERVICE_NAME_LEN - 1))
break;
tmp = (char *)(uintptr_t)(tr->data.ptr.buffer + (2*i));
get_user(c, tmp);
if (c >= 32 && c <= 126) { // visible character range [32, 126]
if (len < OPLUS_MAX_SERVICE_NAME_LEN - 1)
len += sprintf(sname + len, "%c", c);
else
break;
}
if ('\0' == c) {
break;
}
}
sname[len] = '\0';
}
pr_info("context.name[%s] tr.size:%lu service:%s\n",
proc->context->name, (unsigned long)tr->data_size, sname);
} else {
if (NULL != tr && 0 != tr->target.handle) {
sprintf(sname, "AnonymousCallback");
} else {
sprintf(sname, "unknown");
}
}
if (NULL != name){
strncpy(name, sname, OPLUS_MAX_SERVICE_NAME_LEN);
name[OPLUS_MAX_SERVICE_NAME_LEN-1] = '\0';
}
}
#endif // #if defined(CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE)
/**
* binder_proc_lock() - Acquire outer lock for given binder_proc
* @proc: struct binder_proc to acquire
@@ -967,13 +1084,25 @@ err:
return retval;
}
#ifdef CONFIG_OPLUS_BINDER_STRATEGY
bool obthread_has_work(struct binder_thread *thread);
static struct binder_thread *
binder_select_thread_ilocked(struct binder_proc *proc);
#endif
static bool binder_has_work_ilocked(struct binder_thread *thread,
bool do_proc_work)
{
#ifdef CONFIG_OPLUS_BINDER_STRATEGY
return thread->process_todo ||
thread->looper_need_return || (do_proc_work && obthread_has_work(thread)) ||
(do_proc_work &&
!binder_worklist_empty_ilocked(&thread->proc->todo));
#else
return thread->process_todo ||
thread->looper_need_return ||
(do_proc_work &&
!binder_worklist_empty_ilocked(&thread->proc->todo));
#endif
}
static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
@@ -987,6 +1116,290 @@ static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
return has_work;
}
#ifdef CONFIG_OPLUS_BINDER_STRATEGY
#include <soc/oplus/healthinfo.h>
#define BINDER_LOOPER_STATE_BACKGROUND 0x40
#define OBPROC_CHECK_CYCLE_NS 100000000
#define OBWORK_TIMEOUT_NS 800000000
#define BG_THREAD (2)
#define SA_CGROUP_BACKGROUND (3)
struct ob_struct ob_target;
pid_t ob_pid;
int ob_err;
int sysctl_ob_control_enable = 1;
noinline void ob_tracing_mark_write(const char *buf)
{
trace_printk(buf);
}
void ob_sysctrace_c(struct binder_proc *proc, struct binder_thread *thread)
{
char buf[256];
snprintf(buf, sizeof(buf), "C|%d|oplus_bt%s|%d", proc->pid, thread->task->comm, 1);
ob_tracing_mark_write(buf);
}
int get_task_cgroup_id(struct task_struct *task)
{
struct cgroup_subsys_state *css = task_css(task, schedtune_cgrp_id);
return css ? css->id : -1;
}
bool test_task_bg(struct task_struct *task)
{
return (SA_CGROUP_BACKGROUND == get_task_cgroup_id(task)) ? 1 : 0;
}
bool obtrans_is_from_background(struct binder_transaction *t)
{
return test_task_bg(t->from->task);
}
bool obtrans_is_from_third_party(struct binder_transaction *t)
{
return (from_kuid(current_user_ns(), t->sender_euid) % 100000) >= 10000;
}
bool obtrans_is_from_main(struct binder_transaction *t)
{
return t->from->proc->pid == t->from->pid;
}
static char *obs_blacklist[] = {
"rocess.contacts"
};
bool obs_black_list(struct binder_transaction *t)
{
int i = 0;
if (!t->from->proc->tsk)
return false;
for (i = 0; i < ARRAY_SIZE(obs_blacklist); i++) {
if (strstr(t->from->proc->tsk->comm, obs_blacklist[i])) {
pr_debug("%s is obs blacklist, don't limit it!!!",obs_blacklist[i]);
return true;
}
}
return false;
}
bool obwork_is_restrict(struct binder_transaction *t)
{
if ((!t->from) || (!t->from->proc))
return false;
if (obs_black_list(t))
return false;
return (obtrans_is_from_background(t)) && obtrans_is_from_third_party(t) &&
(!obtrans_is_from_main(t)) && (!test_task_ux(t->from->task));
}
void obwork_check_restrict_off(struct binder_proc *proc)
{
struct binder_work *w = NULL;
struct binder_work *tmp = NULL;
struct binder_transaction *t = NULL;
u64 now = sched_clock();
if (proc != ob_target.ob_proc || binder_worklist_empty_ilocked(&ob_target.ob_list) || (now - ob_target.ob_check_ts) < OBPROC_CHECK_CYCLE_NS)
return;
list_for_each_entry_safe(w, tmp, &ob_target.ob_list, entry) {
if (!w)
continue;
t = container_of(w, struct binder_transaction, work);
if (now - w->ob_begin < OBWORK_TIMEOUT_NS)
break;
list_del_init(&w->entry);
//pr_info("%s timeoutinfo:t->from:%d,t->start:%llu now:%llu",__func__,t->from->pid,w->ob_begin,now);
binder_enqueue_work_ilocked(w, &proc->todo);
}
ob_target.ob_check_ts = sched_clock();
}
void obtrans_restrict_start(struct binder_proc *proc, struct binder_transaction *t)
{
if (!sysctl_ob_control_enable)
return;
if (obwork_is_restrict(t)) {
t->work.ob_begin = sched_clock();
}
obwork_check_restrict_off(proc);
}
void oblist_dequeue_all(void)
{
struct binder_work *w;
struct binder_work *w_tmp;
if (ob_target.ob_proc == NULL)
return;
binder_inner_proc_lock(ob_target.ob_proc);
if (binder_worklist_empty_ilocked(&ob_target.ob_list)) {
binder_inner_proc_unlock(ob_target.ob_proc);
return;
}
list_for_each_entry_safe(w, w_tmp, &ob_target.ob_list, entry) {
if (!w)
continue;
list_del_init(&w->entry);
binder_enqueue_work_ilocked(w, &ob_target.ob_proc->todo);
}
binder_inner_proc_unlock(ob_target.ob_proc);
return;
}
void oblist_dequeue_topapp_change(uid_t topuid)
{
struct binder_work *w;
struct binder_work *w_tmp;
struct binder_transaction *t;
if (ob_target.ob_proc == NULL)
return;
binder_inner_proc_lock(ob_target.ob_proc);
if (!binder_worklist_empty_ilocked(&ob_target.ob_list)) {
list_for_each_entry_safe(w, w_tmp, &ob_target.ob_list, entry) {
if (!w)
continue;
t = container_of(w, struct binder_transaction, work);
if (from_kuid(current_user_ns(), t->sender_euid) != topuid)
continue;
list_del_init(&w->entry);
binder_enqueue_work_ilocked(w, &ob_target.ob_proc->todo);
}
}
binder_inner_proc_unlock(ob_target.ob_proc);
return;
}
void obwork_restrict(struct binder_proc *proc, struct binder_transaction *t)
{
if (!sysctl_ob_control_enable || (proc != ob_target.ob_proc) || !obwork_is_restrict(t))
binder_enqueue_work_ilocked(&t->work, &proc->todo);
else
binder_enqueue_work_ilocked(&t->work, &ob_target.ob_list);
}
void obtarget_init(struct binder_proc *proc)
{
if (!proc->tsk || !proc->context || !proc->context->name)
return;
if (ob_target.init)
return;
if ((!strncmp(proc->tsk->comm, "system_server", TASK_COMM_LEN)) && !strcmp(proc->context->name, "binder")) {
ob_target.ob_proc = proc;
ob_target.ob_check_ts = sched_clock();
INIT_LIST_HEAD(&ob_target.ob_list);
ob_target.init = true;
//pr_info("%s: ob_target->pid:%d ob_target->name:%s\n", __func__, proc->tsk->pid,proc->tsk->comm);
}
}
void obthread_init(struct binder_proc *proc, struct binder_thread *thread)
{
if (proc != ob_target.ob_proc)
return;
if (proc->requested_threads_started == BG_THREAD) {
thread->looper |= BINDER_LOOPER_STATE_BACKGROUND;
ob_pid = thread->task->pid;
//pr_info("%s :bg thread->name:%s thread->pid:%d", __func__,thread->task->comm,thread->task->pid);
}
}
bool obthread_has_work(struct binder_thread *thread)
{
if (!sysctl_ob_control_enable || !(thread->looper & BINDER_LOOPER_STATE_BACKGROUND) || (thread->proc != ob_target.ob_proc))
return false;
return !binder_worklist_empty_ilocked(&ob_target.ob_list);
}
bool obproc_has_work(struct binder_proc *proc)
{
if (!sysctl_ob_control_enable || ob_target.ob_proc != proc)
return false;
if (!binder_worklist_empty_ilocked(&ob_target.ob_list))
return true;
else
return false;
}
void obthread_wakeup(struct binder_proc *proc)
{
struct binder_thread *thread = NULL;
list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) {
if (thread && (thread->looper & BINDER_LOOPER_STATE_BACKGROUND)) {
list_del_init(&thread->waiting_thread_node);
wake_up_interruptible(&thread->wait);
}
}
return;
}
void obproc_free(struct binder_proc *proc)
{
if (proc == ob_target.ob_proc) {
BUG_ON(!list_empty(&ob_target.ob_list));
ob_target.ob_check_ts = 0;
ob_target.ob_proc = NULL;
}
}
void obprint_oblist(void)
{
struct binder_work *w;
struct binder_work *w_tmp;
struct binder_transaction *t;
if (!sysctl_ob_control_enable)
return;
if (ob_target.ob_proc == NULL)
return;
if (binder_worklist_empty_ilocked(&ob_target.ob_list))
return;
list_for_each_entry_safe(w, w_tmp, &ob_target.ob_list, entry) {
if (!w)
continue;
t = container_of(w, struct binder_transaction, work);
if (!obwork_is_restrict(t))
ob_err++;
}
}
struct binder_thread *obthread_get(struct binder_proc *proc, struct binder_transaction *t, bool oneway)
{
struct binder_thread *thread = NULL;
if (sysctl_ob_control_enable && (proc == ob_target.ob_proc) && obwork_is_restrict(t) && !oneway) {
list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
if (thread && (thread->looper & BINDER_LOOPER_STATE_BACKGROUND)) {
list_del_init(&thread->waiting_thread_node);
pr_info("%s :bg thread->name:%s thread->pid:%d", __func__, thread->task->comm, thread->task->pid);
return thread;
}
return NULL;
}
return binder_select_thread_ilocked(proc);
}
int sysctl_ob_control_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
if (write && *ppos)
*ppos = 0;
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (!write)
goto out;
if (!sysctl_ob_control_enable) {
pr_info("_%s dequeue all bg work", __func__);
oblist_dequeue_all();
}
out:
return ret;
}
#endif
static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
{
return !thread->transaction_stack &&
@@ -1212,10 +1625,17 @@ static void binder_restore_priority(struct task_struct *task,
binder_do_set_priority(task, desired, /* verify = */ false);
}
#ifdef OPLUS_FEATURE_SCHED_ASSIST
static void binder_transaction_priority(struct binder_thread *thread, struct task_struct *task,
struct binder_transaction *t,
struct binder_priority node_prio,
bool inherit_rt)
#else
static void binder_transaction_priority(struct task_struct *task,
struct binder_transaction *t,
struct binder_priority node_prio,
bool inherit_rt)
#endif /* OPLUS_FEATURE_SCHED_ASSIST */
{
struct binder_priority desired_prio = t->priority;
@@ -1226,6 +1646,13 @@ static void binder_transaction_priority(struct task_struct *task,
t->saved_priority.sched_policy = task->policy;
t->saved_priority.prio = task->normal_prio;
#ifdef OPLUS_FEATURE_SCHED_ASSIST
//NOTE: if task is main thread, and doesn't join pool as a binder thread,
//DON'T actually change priority in binder transaction.
if ((task->tgid == task->pid) && !(thread->looper & BINDER_LOOPER_STATE_ENTERED)) {
return;
}
#endif /* OPLUS_FEATURE_SCHED_ASSIST */
if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
desired_prio.prio = NICE_TO_PRIO(0);
desired_prio.sched_policy = SCHED_NORMAL;
@@ -2528,9 +2955,16 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
}
}
#if defined(CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE)
static int binder_translate_binder(struct binder_transaction_data *tr,
struct flat_binder_object *fp,
struct binder_transaction *t,
struct binder_thread *thread)
#else
static int binder_translate_binder(struct flat_binder_object *fp,
struct binder_transaction *t,
struct binder_thread *thread)
#endif
{
struct binder_node *node;
struct binder_proc *proc = thread->proc;
@@ -2543,6 +2977,9 @@ static int binder_translate_binder(struct flat_binder_object *fp,
node = binder_new_node(proc, fp);
if (!node)
return -ENOMEM;
#if defined(CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE)
oplus_parse_service_name(tr, proc, node->service_name);
#endif
}
if (fp->cookie != node->cookie) {
binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
@@ -2833,6 +3270,13 @@ static int binder_fixup_parent(struct binder_transaction *t,
return 0;
}
#ifdef OPLUS_FEATURE_SCHED_ASSIST
static inline bool is_binder_proc_sf(struct binder_proc *proc)
{
return proc && proc->tsk && strstr(proc->tsk->comm, "surfaceflinger")
&& (task_uid(proc->tsk).val == 1000);
}
#endif /* OPLUS_FEATURE_SCHED_ASSIST */
/**
* binder_proc_transaction() - sends a transaction to a process and wakes it up
* @t: transaction to send
@@ -2850,6 +3294,9 @@ static int binder_fixup_parent(struct binder_transaction *t,
* Return: true if the transactions was successfully queued
* false if the target process or thread is dead
*/
#if defined(OPLUS_FEATURE_SCHED_ASSIST)
extern bool is_sf(struct task_struct *p);
#endif
static bool binder_proc_transaction(struct binder_transaction *t,
struct binder_proc *proc,
struct binder_thread *thread)
@@ -2858,7 +3305,13 @@ static bool binder_proc_transaction(struct binder_transaction *t,
struct binder_priority node_prio;
bool oneway = !!(t->flags & TF_ONE_WAY);
bool pending_async = false;
#if defined(CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE)
struct binder_notify binder_notify_obj;
#endif
#if defined(OPLUS_FEATURE_SCHED_ASSIST)
struct task_struct *grp_leader = NULL;
struct task_struct *curr = current;
#endif
BUG_ON(!node);
binder_node_lock(node);
node_prio.prio = node->min_priority;
@@ -2880,22 +3333,84 @@ static bool binder_proc_transaction(struct binder_transaction *t,
binder_node_unlock(node);
return false;
}
#ifdef CONFIG_OPLUS_BINDER_STRATEGY
obtrans_restrict_start(proc, t);
if (!thread && !pending_async) {
thread = obthread_get(proc, t, oneway);
}
#else
if (!thread && !pending_async)
thread = binder_select_thread_ilocked(proc);
#endif
#if defined(CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE)
if (NULL != node && NULL != proc->tsk) {
binder_notify_obj.caller_task = current;
strncpy(binder_notify_obj.service_name, node->service_name, OPLUS_MAX_SERVICE_NAME_LEN);
binder_notify_obj.service_name[OPLUS_MAX_SERVICE_NAME_LEN-1] = '\0';
binder_notify_obj.pending_async = pending_async;
}
#endif
if (thread) {
#if defined(CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE)
if (NULL != thread && NULL != thread->task) {
binder_notify_obj.binder_task = thread->task;
call_binderevent_notifiers(0, (void *)&binder_notify_obj);
}
#endif
#ifdef OPLUS_FEATURE_SCHED_ASSIST
binder_transaction_priority(thread, thread->task, t, node_prio,
node->inherit_rt);
#else
binder_transaction_priority(thread->task, t, node_prio,
node->inherit_rt);
#endif /* OPLUS_FEATURE_SCHED_ASSIST */
binder_enqueue_thread_work_ilocked(thread, &t->work);
#ifdef OPLUS_FEATURE_SCHED_ASSIST
if (sysctl_sched_assist_enabled) {
if (!oneway || proc->proc_type)
binder_set_inherit_ux(thread->task, current);
}
#endif /* OPLUS_FEATURE_SCHED_ASSIST */
#ifdef CONFIG_OPLUS_FEATURE_INPUT_BOOST_V4
if (t->from) {
binder_thread_set_fbg(thread->task, t->from->task, oneway);
}
#endif /* CONFIG_OPLUS_FEATURE_INPUT_BOOST_V4 */
} else if (!pending_async) {
#if defined(CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE)
if (NULL != proc && NULL != proc->tsk) {
binder_notify_obj.binder_task = proc->tsk;
call_binderevent_notifiers(0, (void *)&binder_notify_obj);
}
#endif
#ifdef CONFIG_OPLUS_BINDER_STRATEGY
obwork_restrict(proc, t);
#else
binder_enqueue_work_ilocked(&t->work, &proc->todo);
#endif
} else {
#if defined(CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE)
if (NULL != proc && NULL != proc->tsk) {
binder_notify_obj.binder_task = proc->tsk;
call_binderevent_notifiers(0, (void *)&binder_notify_obj);
}
#endif
binder_enqueue_work_ilocked(&t->work, &node->async_todo);
}
if (!pending_async)
if (!pending_async) {
#if defined(OPLUS_FEATURE_SCHED_ASSIST)
if (thread && thread->task) {
grp_leader = thread->task->group_leader;
if (grp_leader && is_sf(curr) && test_task_ux(thread->task->group_leader) && oneway) {
set_once_ux(thread->task);
}
}
#endif
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
}
binder_inner_proc_unlock(proc);
binder_node_unlock(node);
@@ -2972,6 +3487,13 @@ static void binder_transaction(struct binder_proc *proc,
int t_debug_id = atomic_inc_return(&binder_last_id);
char *secctx = NULL;
u32 secctx_sz = 0;
#ifdef OPLUS_FEATURE_HANS_FREEZE
char buf_data[INTERFACETOKEN_BUFF_SIZE];
size_t buf_data_size;
char buf[INTERFACETOKEN_BUFF_SIZE] = {0};
int i = 0;
int j = 0;
#endif /*OPLUS_FEATURE_HANS_FREEZE*/
e = binder_transaction_log_add(&binder_transaction_log);
e->debug_id = t_debug_id;
@@ -3087,6 +3609,26 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_dead_binder;
}
#ifdef OPLUS_FEATURE_HANS_FREEZE
if (!(tr->flags & TF_ONE_WAY) //report sync binder call
&& target_proc
&& (task_uid(target_proc->tsk).val > MIN_USERAPP_UID)
&& (proc->pid != target_proc->pid)
&& is_frozen_tg(target_proc->tsk)) {
hans_report(SYNC_BINDER, task_tgid_nr(proc->tsk), task_uid(proc->tsk).val, task_tgid_nr(target_proc->tsk), task_uid(target_proc->tsk).val, "SYNC_BINDER", -1);
}
#endif /*OPLUS_FEATURE_HANS_FREEZE*/
#if defined(CONFIG_CFS_BANDWIDTH)
if (!(tr->flags & TF_ONE_WAY) //report sync binder call
&& target_proc
&& (task_uid(target_proc->tsk).val > MIN_USERAPP_UID || task_uid(target_proc->tsk).val == HANS_SYSTEM_UID) //uid >10000
&& is_belong_cpugrp(target_proc->tsk)) {
hans_report(SYNC_BINDER_CPUCTL, task_tgid_nr(proc->tsk), task_uid(proc->tsk).val, task_tgid_nr(target_proc->tsk), task_uid(target_proc->tsk).val, "SYNC_BINDER_CPUCTL", -1);
}
#endif
e->to_node = target_node->debug_id;
if (security_binder_transaction(proc->cred,
target_proc->cred) < 0) {
@@ -3319,6 +3861,31 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_bad_offset;
}
#ifdef OPLUS_FEATURE_HANS_FREEZE
if ((tr->flags & TF_ONE_WAY) //report async binder call
&& target_proc
&& (task_uid(target_proc->tsk).val > MIN_USERAPP_UID)
&& (proc->pid != target_proc->pid)
&& is_frozen_tg(target_proc->tsk)) {
buf_data_size = tr->data_size>INTERFACETOKEN_BUFF_SIZE ?INTERFACETOKEN_BUFF_SIZE:tr->data_size;
if (!copy_from_user(buf_data, (char*)tr->data.ptr.buffer, buf_data_size)) {
//1.skip first PARCEL_OFFSET bytes (useless data)
//2.make sure the invalid address issue is not occuring(j =PARCEL_OFFSET+1, j+=2)
//3.java layer uses 2 bytes char. And only the first bytes has the data.(p+=2)
if (buf_data_size > PARCEL_OFFSET) {
char *p = (char *)(buf_data) + PARCEL_OFFSET;
j = PARCEL_OFFSET + 1;
while (i < INTERFACETOKEN_BUFF_SIZE && j < buf_data_size && *p != '\0') {
buf[i++] = *p;
j += 2;
p += 2;
}
if (i == INTERFACETOKEN_BUFF_SIZE) buf[i-1] = '\0';
}
hans_report(ASYNC_BINDER, task_tgid_nr(proc->tsk), task_uid(proc->tsk).val, task_tgid_nr(target_proc->tsk), task_uid(target_proc->tsk).val, buf, tr->code);
}
}
#endif /*OPLUS_FEATURE_HANS_FREEZE*/
off_start_offset = ALIGN(tr->data_size, sizeof(void *));
buffer_offset = off_start_offset;
off_end_offset = off_start_offset + tr->offsets_size;
@@ -3360,7 +3927,11 @@ static void binder_transaction(struct binder_proc *proc,
struct flat_binder_object *fp;
fp = to_flat_binder_object(hdr);
#if defined(CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE)
ret = binder_translate_binder(tr, fp, t, thread);
#else
ret = binder_translate_binder(fp, t, thread);
#endif
if (ret < 0) {
return_error = BR_FAILED_REPLY;
return_error_param = ret;
@@ -3516,6 +4087,9 @@ static void binder_transaction(struct binder_proc *proc,
t->work.type = BINDER_WORK_TRANSACTION;
if (reply) {
#ifdef CONFIG_OPLUS_FEATURE_INPUT_BOOST_V4
bool oneway = !!(t->flags & TF_ONE_WAY);
#endif /* CONFIG_OPLUS_FEATURE_INPUT_BOOST_V4 */
binder_enqueue_thread_work(thread, tcomplete);
binder_inner_proc_lock(target_proc);
if (target_thread->is_dead) {
@@ -3528,6 +4102,19 @@ static void binder_transaction(struct binder_proc *proc,
binder_inner_proc_unlock(target_proc);
wake_up_interruptible_sync(&target_thread->wait);
#ifdef OPLUS_FEATURE_SCHED_ASSIST
if (sysctl_sched_assist_enabled && !proc->proc_type) {
binder_unset_inherit_ux(thread->task);
}
#endif /* OPLUS_FEATURE_SCHED_ASSIST */
#ifdef CONFIG_OPLUS_FEATURE_INPUT_BOOST_V4
binder_thread_remove_fbg(thread->task, oneway);
#endif /* CONFIG_OPLUS_FEATURE_INPUT_BOOST_V4 */
#ifdef CONFIG_OPLUS_BINDER_STRATEGY
binder_inner_proc_lock(proc);
obwork_check_restrict_off(proc);
binder_inner_proc_unlock(proc);
#endif
binder_restore_priority(current, in_reply_to->saved_priority);
binder_free_transaction(in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
@@ -3911,6 +4498,9 @@ static int binder_thread_write(struct binder_proc *proc,
proc->requested_threads_started++;
}
thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
#ifdef CONFIG_OPLUS_BINDER_STRATEGY
obthread_init(proc, thread);
#endif
binder_inner_proc_unlock(proc);
break;
case BC_ENTER_LOOPER:
@@ -4170,11 +4760,36 @@ static int binder_wait_for_work(struct binder_thread *thread,
prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
if (binder_has_work_ilocked(thread, do_proc_work))
break;
#ifdef OPLUS_FEATURE_SCHED_ASSIST
if (do_proc_work) {
list_add(&thread->waiting_thread_node,
&proc->waiting_threads);
if (sysctl_sched_assist_enabled) {
binder_unset_inherit_ux(thread->task);
}
}
#else /* OPLUS_FEATURE_SCHED_ASSIST */
if (do_proc_work)
list_add(&thread->waiting_thread_node,
&proc->waiting_threads);
#endif /* OPLUS_FEATURE_SCHED_ASSIST */
#if IS_ENABLED(CONFIG_OPLUS_FEATURE_CPU_JANKINFO)
android_vh_binder_wait_for_work_hanlder(NULL,
do_proc_work, thread, proc);
#endif
binder_inner_proc_unlock(proc);
#ifdef OPLUS_FEATURE_HEALTHINFO
#ifdef CONFIG_OPLUS_JANK_INFO
current->in_binder = 1;
#endif
#endif /* OPLUS_FEATURE_HEALTHINFO */
schedule();
#ifdef OPLUS_FEATURE_HEALTHINFO
#ifdef CONFIG_OPLUS_JANK_INFO
current->in_binder = 0;
#endif
#endif /* OPLUS_FEATURE_HEALTHINFO */
binder_inner_proc_lock(proc);
list_del_init(&thread->waiting_thread_node);
if (signal_pending(current)) {
@@ -4253,6 +4868,11 @@ retry:
binder_inner_proc_lock(proc);
if (!binder_worklist_empty_ilocked(&thread->todo))
list = &thread->todo;
#ifdef CONFIG_OPLUS_BINDER_STRATEGY
else if (obthread_has_work(thread) && wait_for_proc_work) {
list = &ob_target.ob_list;
}
#endif
else if (!binder_worklist_empty_ilocked(&proc->todo) &&
wait_for_proc_work)
list = &proc->todo;
@@ -4448,8 +5068,13 @@ retry:
trd->cookie = target_node->cookie;
node_prio.sched_policy = target_node->sched_policy;
node_prio.prio = target_node->min_priority;
#ifdef OPLUS_FEATURE_SCHED_ASSIST
binder_transaction_priority(thread, current, t, node_prio,
target_node->inherit_rt);
#else
binder_transaction_priority(current, t, node_prio,
target_node->inherit_rt);
#endif /* OPLUS_FEATURE_SCHED_ASSIST */
cmd = BR_TRANSACTION;
} else {
trd->target.ptr = 0;
@@ -4467,6 +5092,11 @@ retry:
trd->sender_pid =
task_tgid_nr_ns(sender,
task_active_pid_ns(current));
#ifdef OPLUS_FEATURE_SCHED_ASSIST
if (sysctl_sched_assist_enabled) {
binder_set_inherit_ux(thread->task, t_from->task);
}
#endif /* OPLUS_FEATURE_SCHED_ASSIST */
} else {
trd->sender_pid = 0;
}
@@ -4686,6 +5316,9 @@ static void binder_free_proc(struct binder_proc *proc)
BUG_ON(!list_empty(&proc->todo));
BUG_ON(!list_empty(&proc->delivered_death));
#ifdef CONFIG_OPLUS_BINDER_STRATEGY
obproc_free(proc);
#endif
device = container_of(proc->context, struct binder_device, context);
if (refcount_dec_and_test(&device->ref)) {
kfree(proc->context->name);
@@ -4861,6 +5494,10 @@ static int binder_ioctl_write_read(struct file *filp,
binder_inner_proc_lock(proc);
if (!binder_worklist_empty_ilocked(&proc->todo))
binder_wakeup_proc_ilocked(proc);
#ifdef CONFIG_OPLUS_BINDER_STRATEGY
if (obproc_has_work(proc))
obthread_wakeup(proc);
#endif
binder_inner_proc_unlock(proc);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
@@ -4922,6 +5559,15 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp,
new_node->has_strong_ref = 1;
new_node->has_weak_ref = 1;
context->binder_context_mgr_node = new_node;
#if defined(CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE)
if (NULL != context->binder_context_mgr_node &&
NULL != context->binder_context_mgr_node->proc &&
NULL != context->binder_context_mgr_node->proc->tsk) {
snprintf(context->binder_context_mgr_node->service_name, OPLUS_MAX_SERVICE_NAME_LEN,
"%s", context->binder_context_mgr_node->proc->tsk->comm);
context->binder_context_mgr_node->service_name[OPLUS_MAX_SERVICE_NAME_LEN-1] = '\0';
}
#endif
binder_node_unlock(new_node);
binder_put_node(new_node);
out:
@@ -5219,6 +5865,9 @@ static int binder_open(struct inode *nodp, struct file *filp)
spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
#ifdef OPLUS_FEATURE_SCHED_ASSIST
proc->proc_type = is_binder_proc_sf(proc) ? 1 : 0;
#endif /* OPLUS_FEATURE_SCHED_ASSIST */
mutex_init(&proc->files_lock);
proc->cred = get_cred(filp->f_cred);
INIT_LIST_HEAD(&proc->todo);
@@ -5248,6 +5897,9 @@ static int binder_open(struct inode *nodp, struct file *filp)
INIT_LIST_HEAD(&proc->delivered_death);
INIT_LIST_HEAD(&proc->waiting_threads);
filp->private_data = proc;
#ifdef CONFIG_OPLUS_BINDER_STRATEGY
obtarget_init(proc);
#endif
mutex_lock(&binder_procs_lock);
hlist_add_head(&proc->proc_node, &binder_procs);
@@ -5491,6 +6143,10 @@ static void binder_deferred_release(struct binder_proc *proc)
binder_release_work(proc, &proc->todo);
binder_release_work(proc, &proc->delivered_death);
#ifdef CONFIG_OPLUS_BINDER_STRATEGY
if (proc == ob_target.ob_proc)
binder_release_work(proc, &ob_target.ob_list);
#endif
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
@@ -5989,6 +6645,71 @@ int binder_state_show(struct seq_file *m, void *unused)
return 0;
}
#ifdef OPLUS_FEATURE_HANS_FREEZE
static void hans_check_uid_proc_status(struct binder_proc *proc, enum message_type type)
{
struct rb_node *n = NULL;
struct binder_thread *thread = NULL;
int uid = -1;
struct binder_transaction *btrans = NULL;
bool empty = true;
/* check binder_thread/transaction_stack/binder_proc ongoing transaction */
binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
thread = rb_entry(n, struct binder_thread, rb_node);
empty = binder_worklist_empty_ilocked(&thread->todo);
if (thread->task != NULL) {
/* has "todo" binder thread in worklist? */
uid = task_uid(thread->task).val;
if (!empty) {
binder_inner_proc_unlock(proc);
hans_report(type, -1, -1, -1, uid, "FROZEN_TRANS_THREAD", 1);
return;
}
/* has transcation in transaction_stack? */
btrans = thread->transaction_stack;
if (btrans) {
spin_lock(&btrans->lock);
if (btrans->to_thread == thread) {
/* only report incoming binder call */
spin_unlock(&btrans->lock);
binder_inner_proc_unlock(proc);
hans_report(type, -1, -1, -1, uid, "FROZEN_TRANS_STACK", 1);
return;
}
spin_unlock(&btrans->lock);
}
}
}
/* has "todo" binder proc in worklist */
empty = binder_worklist_empty_ilocked(&proc->todo);
if (proc->tsk != NULL && !empty) {
uid = task_uid(proc->tsk).val;
binder_inner_proc_unlock(proc);
hans_report(type, -1, -1, -1, uid, "FROZEN_TRANS_PROC", 1);
return;
}
binder_inner_proc_unlock(proc);
}
void hans_check_frozen_transcation(uid_t uid, enum message_type type)
{
struct binder_proc *proc;
mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node) {
if (proc != NULL && (task_uid(proc->tsk).val == uid)) {
hans_check_uid_proc_status(proc, type);
}
}
mutex_unlock(&binder_procs_lock);
}
#endif /*OPLUS_FEATURE_HANS_FREEZE*/
int binder_stats_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;

View File

@@ -33,6 +33,9 @@
#include <linux/highmem.h>
#include "binder_alloc.h"
#include "binder_trace.h"
#ifdef OPLUS_FEATURE_HANS_FREEZE
#include <linux/hans.h>
#endif /*OPLUS_FEATURE_HANS_FREEZE*/
struct list_lru binder_alloc_lru;
@@ -400,6 +403,9 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
void __user *end_page_addr;
size_t size, data_offsets_size;
int ret;
#ifdef OPLUS_FEATURE_HANS_FREEZE
struct task_struct *p = NULL;
#endif /*OPLUS_FEATURE_HANS_FREEZE*/
if (!binder_alloc_get_vma(alloc)) {
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
@@ -424,6 +430,18 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
alloc->pid, extra_buffers_size);
return ERR_PTR(-EINVAL);
}
#ifdef OPLUS_FEATURE_HANS_FREEZE
if (is_async
&& (alloc->free_async_space < 3 * (size + sizeof(struct binder_buffer))
|| (alloc->free_async_space < ((alloc->buffer_size / 2) * 9 / 10)))) {
rcu_read_lock();
p = find_task_by_vpid(alloc->pid);
rcu_read_unlock();
if (p != NULL && is_frozen_tg(p)) {
hans_report(ASYNC_BINDER, task_tgid_nr(current), task_uid(current).val, task_tgid_nr(p), task_uid(p).val, "free_buffer_full", -1);
}
}
#endif /*OPLUS_FEATURE_HANS_FREEZE*/
if (is_async &&
alloc->free_async_space < size + sizeof(struct binder_buffer)) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
@@ -548,6 +566,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
debug_low_async_space_locked(alloc, pid);
}
}
#ifdef OPLUS_BUG_STABILITY
if (size > 2000000 /*2MB*/){
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf size %zd successed, but it seems too large.\n",
alloc->pid, size);
}
#endif /*OPLUS_BUG_STABILITY*/
return buffer;
err_alloc_buf_struct_failed:

View File

@@ -12,6 +12,10 @@
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/uidgid.h>
#ifdef CONFIG_OPLUS_FEATURE_CPU_JANKINFO
#include "binder_alloc.h"
#endif
struct binder_context {
struct binder_node *binder_context_mgr_node;
@@ -141,6 +145,406 @@ struct binder_transaction_log {
struct binder_transaction_log_entry entry[32];
};
#ifdef CONFIG_OPLUS_FEATURE_CPU_JANKINFO
#define OPLUS_MAX_SERVICE_NAME_LEN 32
enum binder_stat_types {
BINDER_STAT_PROC,
BINDER_STAT_THREAD,
BINDER_STAT_NODE,
BINDER_STAT_REF,
BINDER_STAT_DEATH,
BINDER_STAT_TRANSACTION,
BINDER_STAT_TRANSACTION_COMPLETE,
BINDER_STAT_COUNT
};
struct binder_stats {
atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
atomic_t obj_created[BINDER_STAT_COUNT];
atomic_t obj_deleted[BINDER_STAT_COUNT];
};
/**
* struct binder_work - work enqueued on a worklist
* @entry: node enqueued on list
* @type: type of work to be performed
*
* There are separate work lists for proc, thread, and node (async).
*/
struct binder_work {
struct list_head entry;
enum binder_work_type {
BINDER_WORK_TRANSACTION = 1,
BINDER_WORK_TRANSACTION_COMPLETE,
BINDER_WORK_RETURN_ERROR,
BINDER_WORK_NODE,
BINDER_WORK_DEAD_BINDER,
BINDER_WORK_DEAD_BINDER_AND_CLEAR,
BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
} type;
#ifdef CONFIG_OPLUS_BINDER_STRATEGY
u64 ob_begin;
#endif
};
struct binder_error {
struct binder_work work;
uint32_t cmd;
};
/**
* struct binder_node - binder node bookkeeping
* @debug_id: unique ID for debugging
* (invariant after initialized)
* @lock: lock for node fields
* @work: worklist element for node work
* (protected by @proc->inner_lock)
* @rb_node: element for proc->nodes tree
* (protected by @proc->inner_lock)
* @dead_node: element for binder_dead_nodes list
* (protected by binder_dead_nodes_lock)
* @proc: binder_proc that owns this node
* (invariant after initialized)
* @refs: list of references on this node
* (protected by @lock)
* @internal_strong_refs: used to take strong references when
* initiating a transaction
* (protected by @proc->inner_lock if @proc
* and by @lock)
* @local_weak_refs: weak user refs from local process
* (protected by @proc->inner_lock if @proc
* and by @lock)
* @local_strong_refs: strong user refs from local process
* (protected by @proc->inner_lock if @proc
* and by @lock)
* @tmp_refs: temporary kernel refs
* (protected by @proc->inner_lock while @proc
* is valid, and by binder_dead_nodes_lock
* if @proc is NULL. During inc/dec and node release
* it is also protected by @lock to provide safety
* as the node dies and @proc becomes NULL)
* @ptr: userspace pointer for node
* (invariant, no lock needed)
* @cookie: userspace cookie for node
* (invariant, no lock needed)
* @has_strong_ref: userspace notified of strong ref
* (protected by @proc->inner_lock if @proc
* and by @lock)
* @pending_strong_ref: userspace has acked notification of strong ref
* (protected by @proc->inner_lock if @proc
* and by @lock)
* @has_weak_ref: userspace notified of weak ref
* (protected by @proc->inner_lock if @proc
* and by @lock)
* @pending_weak_ref: userspace has acked notification of weak ref
* (protected by @proc->inner_lock if @proc
* and by @lock)
* @has_async_transaction: async transaction to node in progress
* (protected by @lock)
* @sched_policy: minimum scheduling policy for node
* (invariant after initialized)
* @accept_fds: file descriptor operations supported for node
* (invariant after initialized)
* @min_priority: minimum scheduling priority
* (invariant after initialized)
* @inherit_rt: inherit RT scheduling policy from caller
* @txn_security_ctx: require sender's security context
* (invariant after initialized)
* @async_todo: list of async work items
* (protected by @proc->inner_lock)
*
* Bookkeeping structure for binder nodes.
*/
struct binder_node {
int debug_id;
spinlock_t lock;
struct binder_work work;
union {
struct rb_node rb_node;
struct hlist_node dead_node;
};
struct binder_proc *proc;
struct hlist_head refs;
int internal_strong_refs;
int local_weak_refs;
int local_strong_refs;
int tmp_refs;
binder_uintptr_t ptr;
binder_uintptr_t cookie;
struct {
/*
* bitfield elements protected by
* proc inner_lock
*/
u8 has_strong_ref:1;
u8 pending_strong_ref:1;
u8 has_weak_ref:1;
u8 pending_weak_ref:1;
};
struct {
/*
* invariant after initialization
*/
u8 sched_policy:2;
u8 inherit_rt:1;
u8 accept_fds:1;
u8 txn_security_ctx:1;
u8 min_priority;
};
bool has_async_transaction;
struct list_head async_todo;
#if defined(CONFIG_OPLUS_FEATURE_BINDER_STATS_ENABLE)
char service_name[OPLUS_MAX_SERVICE_NAME_LEN];
#endif
};
struct binder_ref_death {
/**
* @work: worklist element for death notifications
* (protected by inner_lock of the proc that
* this ref belongs to)
*/
struct binder_work work;
binder_uintptr_t cookie;
};
/**
* struct binder_ref_data - binder_ref counts and id
* @debug_id: unique ID for the ref
* @desc: unique userspace handle for ref
* @strong: strong ref count (debugging only if not locked)
* @weak: weak ref count (debugging only if not locked)
*
* Structure to hold ref count and ref id information. Since
* the actual ref can only be accessed with a lock, this structure
* is used to return information about the ref to callers of
* ref inc/dec functions.
*/
struct binder_ref_data {
int debug_id;
uint32_t desc;
int strong;
int weak;
};
/**
* struct binder_ref - struct to track references on nodes
* @data: binder_ref_data containing id, handle, and current refcounts
* @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
* @rb_node_node: node for lookup by @node in proc's rb_tree
* @node_entry: list entry for node->refs list in target node
* (protected by @node->lock)
* @proc: binder_proc containing ref
* @node: binder_node of target node. When cleaning up a
* ref for deletion in binder_cleanup_ref, a non-NULL
* @node indicates the node must be freed
* @death: pointer to death notification (ref_death) if requested
* (protected by @node->lock)
*
* Structure to track references from procA to target node (on procB). This
* structure is unsafe to access without holding @proc->outer_lock.
*/
struct binder_ref {
/* Lookups needed: */
/* node + proc => ref (transaction) */
/* desc + proc => ref (transaction, inc/dec ref) */
/* node => refs + procs (proc exit) */
struct binder_ref_data data;
struct rb_node rb_node_desc;
struct rb_node rb_node_node;
struct hlist_node node_entry;
struct binder_proc *proc;
struct binder_node *node;
struct binder_ref_death *death;
};
enum binder_deferred_state {
BINDER_DEFERRED_PUT_FILES = 0x01,
BINDER_DEFERRED_FLUSH = 0x02,
BINDER_DEFERRED_RELEASE = 0x04,
};
/**
* struct binder_priority - scheduler policy and priority
* @sched_policy scheduler policy
* @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
*
* The binder driver supports inheriting the following scheduler policies:
* SCHED_NORMAL
* SCHED_BATCH
* SCHED_FIFO
* SCHED_RR
*/
struct binder_priority {
unsigned int sched_policy;
int prio;
};
/**
* struct binder_proc - binder process bookkeeping
* @proc_node: element for binder_procs list
* @threads: rbtree of binder_threads in this proc
* (protected by @inner_lock)
* @nodes: rbtree of binder nodes associated with
* this proc ordered by node->ptr
* (protected by @inner_lock)
* @refs_by_desc: rbtree of refs ordered by ref->desc
* (protected by @outer_lock)
* @refs_by_node: rbtree of refs ordered by ref->node
* (protected by @outer_lock)
* @waiting_threads: threads currently waiting for proc work
* (protected by @inner_lock)
* @pid PID of group_leader of process
* (invariant after initialized)
* @tsk task_struct for group_leader of process
* (invariant after initialized)
* @files files_struct for process
* (protected by @files_lock)
* @files_lock mutex to protect @files
* @cred struct cred associated with the `struct file`
* in binder_open()
* (invariant after initialized)
* @deferred_work_node: element for binder_deferred_list
* (protected by binder_deferred_lock)
* @deferred_work: bitmap of deferred work to perform
* (protected by binder_deferred_lock)
* @is_dead: process is dead and awaiting free
* when outstanding transactions are cleaned up
* (protected by @inner_lock)
* @todo: list of work for this process
* (protected by @inner_lock)
* @stats: per-process binder statistics
* (atomics, no lock needed)
* @delivered_death: list of delivered death notification
* (protected by @inner_lock)
* @max_threads: cap on number of binder threads
* (protected by @inner_lock)
* @requested_threads: number of binder threads requested but not
* yet started. In current implementation, can
* only be 0 or 1.
* (protected by @inner_lock)
* @requested_threads_started: number binder threads started
* (protected by @inner_lock)
* @tmp_ref: temporary reference to indicate proc is in use
* (protected by @inner_lock)
* @default_priority: default scheduler priority
* (invariant after initialized)
* @debugfs_entry: debugfs node
* @alloc: binder allocator bookkeeping
* @context: binder_context for this proc
* (invariant after initialized)
* @inner_lock: can nest under outer_lock and/or node lock
* @outer_lock: no nesting under innor or node lock
* Lock order: 1) outer, 2) node, 3) inner
* @binderfs_entry: process-specific binderfs log file
*
* Bookkeeping structure for binder processes
*/
struct binder_proc {
struct hlist_node proc_node;
struct rb_root threads;
struct rb_root nodes;
struct rb_root refs_by_desc;
struct rb_root refs_by_node;
struct list_head waiting_threads;
int pid;
struct task_struct *tsk;
struct files_struct *files;
struct mutex files_lock;
const struct cred *cred;
struct hlist_node deferred_work_node;
int deferred_work;
bool is_dead;
#ifdef OPLUS_FEATURE_SCHED_ASSIST
int proc_type;
#endif /* OPLUS_FEATURE_SCHED_ASSIST */
struct list_head todo;
struct binder_stats stats;
struct list_head delivered_death;
int max_threads;
int requested_threads;
int requested_threads_started;
int tmp_ref;
struct binder_priority default_priority;
struct dentry *debugfs_entry;
struct binder_alloc alloc;
struct binder_context *context;
spinlock_t inner_lock;
spinlock_t outer_lock;
struct dentry *binderfs_entry;
};
enum {
BINDER_LOOPER_STATE_REGISTERED = 0x01,
BINDER_LOOPER_STATE_ENTERED = 0x02,
BINDER_LOOPER_STATE_EXITED = 0x04,
BINDER_LOOPER_STATE_INVALID = 0x08,
BINDER_LOOPER_STATE_WAITING = 0x10,
BINDER_LOOPER_STATE_POLL = 0x20,
#ifdef CONFIG_OPLUS_BINDER_STRATEGY
BINDER_LOOPER_STATE_BACKGROUND = 0x40,
#endif
};
/**
* struct binder_thread - binder thread bookkeeping
* @proc: binder process for this thread
* (invariant after initialization)
* @rb_node: element for proc->threads rbtree
* (protected by @proc->inner_lock)
* @waiting_thread_node: element for @proc->waiting_threads list
* (protected by @proc->inner_lock)
* @pid: PID for this thread
* (invariant after initialization)
* @looper: bitmap of looping state
* (only accessed by this thread)
* @looper_needs_return: looping thread needs to exit driver
* (no lock needed)
* @transaction_stack: stack of in-progress transactions for this thread
* (protected by @proc->inner_lock)
* @todo: list of work to do for this thread
* (protected by @proc->inner_lock)
* @process_todo: whether work in @todo should be processed
* (protected by @proc->inner_lock)
* @return_error: transaction errors reported by this thread
* (only accessed by this thread)
* @reply_error: transaction errors reported by target thread
* (protected by @proc->inner_lock)
* @wait: wait queue for thread work
* @stats: per-thread statistics
* (atomics, no lock needed)
* @tmp_ref: temporary reference to indicate thread is in use
* (atomic since @proc->inner_lock cannot
* always be acquired)
* @is_dead: thread is dead and awaiting free
* when outstanding transactions are cleaned up
* (protected by @proc->inner_lock)
* @task: struct task_struct for this thread
*
* Bookkeeping structure for binder threads.
*/
struct binder_thread {
struct binder_proc *proc;
struct rb_node rb_node;
struct list_head waiting_thread_node;
int pid;
int looper; /* only modified by this thread */
bool looper_need_return; /* can be written by other thread */
struct binder_transaction *transaction_stack;
struct list_head todo;
bool process_todo;
struct binder_error return_error;
struct binder_error reply_error;
wait_queue_head_t wait;
struct binder_stats stats;
atomic_t tmp_ref;
bool is_dead;
struct task_struct *task;
};
#endif
extern struct binder_transaction_log binder_transaction_log;
extern struct binder_transaction_log binder_transaction_log_failed;
#endif /* _LINUX_BINDER_INTERNAL_H */

View File

@@ -156,6 +156,10 @@ config DEBUG_TEST_DRIVER_REMOVE
source "drivers/base/test/Kconfig"
#ifdef OPLUS_FEATURE_TP_BASIC
source "drivers/base/kernelFwUpdate/Kconfig"
#endif /*OPLUS_FEATURE_TP_BASIC*/
config SYS_HYPERVISOR
bool
default n

View File

@@ -28,3 +28,6 @@ obj-y += test/
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
#ifdef OPLUS_FEATURE_TP_BASIC
obj-$(CONFIG_OPLUS_FW_UPDATE) += kernelFwUpdate/
#endif /*OPLUS_FEATURE_TP_BASIC*/

View File

@@ -544,6 +544,10 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs,
struct device *f_dev = &fw_sysfs->dev;
struct fw_priv *fw_priv = fw_sysfs->fw_priv;
#ifdef OPLUS_FEATURE_TP_BSPFWUPDATE
char *envp[2]={"FwUp=compare", NULL};
#endif/*OPLUS_FEATURE_TP_BSPFWUPDATE*/
/* fall back on userspace loading */
if (!fw_priv->data)
fw_priv->is_paged_buf = true;
@@ -569,7 +573,15 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs,
fw_priv->need_uevent = true;
dev_set_uevent_suppress(f_dev, false);
dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_name);
#ifdef OPLUS_FEATURE_TP_BSPFWUPDATE
if (opt_flags & FW_OPT_COMPARE) {
kobject_uevent_env(&fw_sysfs->dev.kobj, KOBJ_CHANGE,envp);
} else {
kobject_uevent(&fw_sysfs->dev.kobj, KOBJ_ADD);
}
#else
kobject_uevent(&fw_sysfs->dev.kobj, KOBJ_ADD);
#endif/*OPLUS_FEATURE_TP_BSPFWUPDATE*/
} else {
timeout = MAX_JIFFY_OFFSET;
}

View File

@@ -37,6 +37,9 @@ enum fw_opt {
FW_OPT_NO_WARN = BIT(3),
FW_OPT_NOCACHE = BIT(4),
FW_OPT_NOFALLBACK = BIT(5),
#ifdef OPLUS_FEATURE_TP_BSPFWUPDATE
FW_OPT_COMPARE = BIT(6),
#endif/*OPLUS_FEATURE_TP_BSPFWUPDATE*/
};
enum fw_status {

View File

@@ -283,6 +283,10 @@ static void free_fw_priv(struct fw_priv *fw_priv)
/* direct firmware loading support */
static char fw_path_para[256];
static const char * const fw_path[] = {
//#ifdef OPLUS_FEATURE_WIFI_RUSUPGRADE
//add for: support auto update function, include mtk fw, mtk wifi.cfg, qcom fw, qcom bdf, qcom ini
"/data/misc/firmware/active",
//#endif /* OPLUS_FEATURE_WIFI_RUSUPGRADE */
fw_path_para,
"/lib/firmware/updates/" UTS_RELEASE,
"/lib/firmware/updates",
@@ -298,8 +302,14 @@ static const char * const fw_path[] = {
module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
#ifdef OPLUS_FEATURE_TP_BSPFWUPDATE
static int fw_get_filesystem_firmware(struct device *device,
struct fw_priv *fw_priv,
enum fw_opt opt_flags)
#else
static int
fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv)
#endif /*OPLUS_FEATURE_TP_BSPFWUPDATE*/
{
loff_t size;
int i, len;
@@ -308,6 +318,13 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv)
enum kernel_read_file_id id = READING_FIRMWARE;
size_t msize = INT_MAX;
#ifdef OPLUS_FEATURE_TP_BSPFWUPDATE
if(opt_flags & FW_OPT_COMPARE) {
pr_err("%s opt_flags get FW_OPT_COMPARE!\n", __func__);
return rc;
}
#endif/*OPLUS_FEATURE_TP_BSPFWUPDATE*/
/* Already populated data member means we're loading into a buffer */
if (fw_priv->data) {
id = READING_FIRMWARE_PREALLOC_BUFFER;
@@ -330,6 +347,24 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv)
break;
}
#if defined(OPLUS_FEATURE_CAMERA_OIS)
if (strstr(fw_path[i], "/lib/firmware/") != NULL) {
if (strstr(fw_priv->fw_name, "ois_") != NULL) {
snprintf(path, PATH_MAX, "%s/%s", "/odm/vendor/firmware", fw_priv->fw_name);
}
}
#endif /*OPLUS_FEATURE_CAMERA_OIS*/
#if defined(OPLUS_FEATURE_PXLW_IRIS5)
if (!strcmp(fw_priv->fw_name, "iris5.fw")
|| !strcmp(fw_priv->fw_name, "iris5_ccf1.fw")
|| !strcmp(fw_priv->fw_name, "iris5_ccf2.fw")) {
snprintf(path, PATH_MAX, "%s/%s", "/odm/vendor/firmware", fw_priv->fw_name);
}
if (!strcmp(fw_priv->fw_name, "iris5_ccf1b.fw")
|| !strcmp(fw_priv->fw_name, "iris5_ccf2b.fw")) {
snprintf(path, PATH_MAX, "%s/%s", "/data/vendor/display", fw_priv->fw_name);
}
#endif /*OPLUS_FEATURE_PXLW_IRIS5*/
fw_priv->size = 0;
rc = kernel_read_file_from_path(path, &fw_priv->data, &size,
msize, id);
@@ -592,7 +627,11 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
if (ret <= 0) /* error or already assigned */
goto out;
#ifdef OPLUS_FEATURE_TP_BSPFWUPDATE
ret = fw_get_filesystem_firmware(device, fw->priv, opt_flags);
#else
ret = fw_get_filesystem_firmware(device, fw->priv);
#endif/*OPLUS_FEATURE_TP_BSPFWUPDATE*/
if (ret) {
if (!(opt_flags & FW_OPT_NO_WARN))
dev_dbg(device,
@@ -648,6 +687,25 @@ request_firmware(const struct firmware **firmware_p, const char *name,
}
EXPORT_SYMBOL(request_firmware);
#ifdef VENDOR_EDIT
//Add for: reload wlan bdf without using cache
int
request_firmware_no_cache(const struct firmware **firmware_p, const char *name,
struct device *device)
{
int ret;
/* Need to pin this module until return */
__module_get(THIS_MODULE);
ret = _request_firmware(firmware_p, name, device, NULL, 0,
FW_OPT_UEVENT | FW_OPT_NOCACHE);
module_put(THIS_MODULE);
return ret;
}
EXPORT_SYMBOL(request_firmware_no_cache);
#endif /* VENDOR_EDIT */
/**
* firmware_request_nowarn() - request for an optional fw module
* @firmware: pointer to firmware image

1
drivers/base/kernelFwUpdate Symbolic link
View File

@@ -0,0 +1 @@
../../../../vendor/oplus/kernel/touchpanel/kernelFwUpdate/

View File

@@ -25,7 +25,9 @@
#include <linux/atomic.h>
#include <linux/uaccess.h>
#if defined(OPLUS_FEATURE_MULTI_FREEAREA) && defined(CONFIG_PHYSICAL_ANTI_FRAGMENTATION)
#include <linux/mmzone.h>
#endif
static DEFINE_MUTEX(mem_sysfs_mutex);
#define MEMORY_CLASS_NAME "memory"
@@ -447,12 +449,32 @@ static DEVICE_ATTR(valid_zones, 0444, show_valid_zones, NULL);
#ifdef CONFIG_MEMORY_HOTPLUG
static int count_num_free_block_pages(struct zone *zone, int bid)
{
#if defined(OPLUS_FEATURE_MULTI_FREEAREA) && defined(CONFIG_PHYSICAL_ANTI_FRAGMENTATION)
int order, type, flc;
#else
int order, type;
#endif
unsigned long freecount = 0;
unsigned long flags;
spin_lock_irqsave(&zone->lock, flags);
for (type = 0; type < MIGRATE_TYPES; type++) {
#if defined(OPLUS_FEATURE_MULTI_FREEAREA) && defined(CONFIG_PHYSICAL_ANTI_FRAGMENTATION)
for (flc = 0; flc < FREE_AREA_COUNTS; flc++) {
struct free_area *area;
struct page *page;
for (order = 0; order < MAX_ORDER; ++order) {
area = &(zone->free_area[flc][order]);
list_for_each_entry(page, &area->free_list[type], lru) {
unsigned long pfn = page_to_pfn(page);
int section_nr = pfn_to_section_nr(pfn);
if (bid == base_memory_block_id(section_nr))
freecount += (1 << order);
}
}
}
#else
for (order = 0; order < MAX_ORDER; ++order) {
struct free_area *area;
struct page *page;
@@ -467,6 +489,7 @@ static int count_num_free_block_pages(struct zone *zone, int bid)
}
}
#endif
}
spin_unlock_irqrestore(&zone->lock, flags);

View File

@@ -19,12 +19,22 @@
#include <linux/irqdesc.h>
#include <linux/wakeup_reason.h>
#include <trace/events/power.h>
#ifdef OPLUS_FEATURE_LOGKIT
#include <linux/rtc.h>
#include <soc/oplus/system/oplus_sync_time.h>
#endif /* OPLUS_FEATURE_LOGKIT */
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/irqdesc.h>
#ifdef OPLUS_FEATURE_POWERINFO_STANDBY
#include <soc/oplus/oplus_wakelock_profiler.h>
#endif /* OPLUS_FEATURE_POWERINFO_STANDBY */
#include "power.h"
#include <linux/proc_fs.h>
#ifndef CONFIG_SUSPEND
suspend_state_t pm_suspend_target_state;
#define pm_suspend_target_state (PM_SUSPEND_ON)
@@ -545,6 +555,10 @@ static void wakeup_source_activate(struct wakeup_source *ws)
"unregistered wakeup source\n"))
return;
#ifdef OPLUS_FEATURE_POWERINFO_STANDBY
wakeup_get_start_time();
#endif /* OPLUS_FEATURE_POWERINFO_STANDBY */
ws->active = true;
ws->active_count++;
ws->last_time = ktime_get();
@@ -686,8 +700,12 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
trace_wakeup_source_deactivate(ws->name, cec);
split_counters(&cnt, &inpr);
if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
if (!inpr && waitqueue_active(&wakeup_count_wait_queue)) {
#ifdef OPLUS_FEATURE_POWERINFO_STANDBY
wakeup_get_end_hold_time();
#endif /* OPLUS_FEATURE_POWERINFO_STANDBY */
wake_up(&wakeup_count_wait_queue);
}
}
/**
@@ -861,7 +879,11 @@ void pm_print_active_wakeup_sources(void)
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->active) {
#ifdef OPLUS_FEATURE_POWERINFO_STANDBY
pr_info("active wakeup source: %s\n", ws->name);
#else
pr_debug("active wakeup source: %s\n", ws->name);
#endif /* OPLUS_FEATURE_POWERINFO_STANDBY */
active = 1;
} else if (!active &&
(!last_activity_ws ||
@@ -871,13 +893,46 @@ void pm_print_active_wakeup_sources(void)
}
}
if (!active && last_activity_ws)
if (!active && last_activity_ws) {
#ifdef OPLUS_FEATURE_POWERINFO_STANDBY
pr_info("last active wakeup source: %s\n",
last_activity_ws->name);
#else
pr_debug("last active wakeup source: %s\n",
last_activity_ws->name);
#endif /* OPLUS_FEATURE_POWERINFO_STANDBY */
}
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
#ifdef OPLUS_FEATURE_POWERINFO_STANDBY
void get_ws_listhead(struct list_head **ws)
{
if (ws)
*ws = &wakeup_sources;
}
void wakeup_srcu_read_lock(int *srcuidx)
{
*srcuidx = srcu_read_lock(&wakeup_srcu);
}
void wakeup_srcu_read_unlock(int srcuidx)
{
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
bool ws_all_release(void)
{
unsigned int cnt, inpr;
pr_info("Enter: %s\n", __func__);
split_counters(&cnt, &inpr);
return (!inpr) ? true : false;
}
#endif /* OPLUS_FEATURE_POWERINFO_STANDBY */
/**
* pm_wakeup_pending - Check if power transition in progress should be aborted.
*
@@ -903,6 +958,12 @@ bool pm_wakeup_pending(void)
raw_spin_unlock_irqrestore(&events_lock, flags);
if (ret) {
#ifndef OPLUS_FEATURE_POWERINFO_STANDBY
pr_debug("PM: Wakeup pending, aborting suspend\n");
#else
pr_info("PM: Wakeup pending, aborting suspend\n");
wakeup_reasons_statics(IRQ_NAME_ABORT, WS_CNT_ABORT);
#endif /* OPLUS_FEATURE_POWERINFO_STANDBY */
pm_get_active_wakeup_sources(suspend_abort,
MAX_SUSPEND_ABORT_LEN);
log_suspend_abort_reason(suspend_abort);
@@ -946,6 +1007,11 @@ void pm_system_irq_wakeup(unsigned int irq_number)
log_irq_wakeup_reason(irq_number);
pr_warn("%s: %d triggered %s\n", __func__, irq_number, name);
#ifdef OPLUS_FEATURE_POWERINFO_STANDBY
pr_info("%s: resume caused by irq=%d, name=%s\n", __func__, irq_number, name);
wakeup_reasons_statics(name, WS_CNT_POWERKEY|WS_CNT_RTCALARM);
#endif /* OPLUS_FEATURE_POWERINFO_STANDBY */
pm_wakeup_irq = irq_number;
pm_system_wakeup();
}
@@ -1169,12 +1235,22 @@ static const struct file_operations wakeup_sources_stats_fops = {
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
#ifdef OPLUS_FEATURE_LOGKIT
.write = watchdog_write,
#endif /* OPLUS_FEATURE_LOGKIT */
};
static int __init wakeup_sources_debugfs_init(void)
{
#ifndef OPLUS_FEATURE_LOGKIT
wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources",
S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops);
#else /* OPLUS_FEATURE_LOGKIT */
wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources",
S_IRUGO| S_IWUGO, NULL, NULL, &wakeup_sources_stats_fops);
#endif /* OPLUS_FEATURE_LOGKIT */
proc_create_data("wakeup_sources", 0444, NULL, &wakeup_sources_stats_fops, NULL);
return 0;
}

View File

@@ -53,3 +53,55 @@ config ZRAM_MEMORY_TRACKING
/sys/kernel/debug/zram/zramX/block_state.
See Documentation/blockdev/zram.txt for more information.
#ifdef OPLUS_FEATURE_ZRAM_OPT
config OPLUS_ZRAM_OPT
bool "oplus zram optimization"
depends on ZRAM
default y
help
oplus zram optimization
#endif /*OPLUS_FEATURE_ZRAM_OPT*/
#ifdef OPLUS_FEATURE_ZRAM_WRITEBACK
source "drivers/block/zram/zwb_handle/Kconfig"
#endif /* OPLUS_FEATURE_ZRAM_WRITEBACK */
config HYBRIDSWAP
bool "Enable Hybridswap"
depends on MEMCG && ZRAM && !ZRAM_DEDUP && !ZRAM_WRITEBACK && !ZWB_HANDLE
default n
help
Hybridswap is a intelligent memory management solution.
config HYBRIDSWAP_SWAPD
bool "Enable hybridswap swapd thread to reclaim anon pages in background"
default n
depends on HYBRIDSWAP
help
swapd is a kernel thread that reclaim anonymous pages in the
background. When the use of swap pages reaches the watermark
and the refault of anonymous pages is high, the content of
zram will exchanged to eswap by a certain percentage.
# Selected when system need hybridswap container
config HYBRIDSWAP_CORE
bool "Hybridswap container device support"
depends on ZRAM && HYBRIDSWAP
default n
help
Say Y here if you want to use the hybridswap
as the backend device in ZRAM.
If unsure, say N here.
This module can't be compiled as a module,
the module is as one part of the ZRAM driver.
config HYBRIDSWAP_ASYNC_COMPRESS
bool "hypbridswap support asynchronous compress anon pages"
depends on ZRAM && HYBRIDSWAP
default n
help
Say Y here if you want to create asynchronous thread
for compress anon pages.
If unsure, say N here.
This feature will reduce the kswapd cpu load.

View File

@@ -1,4 +1,14 @@
zram-y := zcomp.o zram_drv.o
# SPDX-License-Identifier: GPL-2.0-only
zram-y := zcomp.o zram_drv.o
zram-$(CONFIG_ZRAM_DEDUP) += zram_dedup.o
obj-$(CONFIG_ZRAM) += zram.o
#ifdef OPLUS_FEATURE_ZRAM_WRITEBACK
obj-$(CONFIG_ZWB_HANDLE) += zwb_handle/
#endif /* OPLUS_FEATURE_ZRAM_WRITEBACK */
zram-$(CONFIG_HYBRIDSWAP) += hybridswap/hybridswap_main.o
zram-$(CONFIG_HYBRIDSWAP_SWAPD) += hybridswap/hybridswap_swapd.o
zram-$(CONFIG_HYBRIDSWAP_ASYNC_COMPRESS) += hybridswap/hybridswap_akcompress.o
zram-$(CONFIG_HYBRIDSWAP_CORE) += hybridswap/hybridswap_eswap.o

View File

@@ -0,0 +1,98 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2022 Oplus. All rights reserved.
*/
#ifndef HYBRIDSWAP_H
#define HYBRIDSWAP_H
extern int __init hybridswap_pre_init(void);
extern ssize_t hybridswap_vmstat_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t hybridswap_loglevel_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len);
extern ssize_t hybridswap_loglevel_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t hybridswap_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len);
extern ssize_t hybridswap_enable_show(struct device *dev,
struct device_attribute *attr, char *buf);
#ifdef CONFIG_HYBRIDSWAP_CORE
extern void hybridswap_record(struct zram *zram, u32 index, struct mem_cgroup *memcg);
extern void hybridswap_untrack(struct zram *zram, u32 index);
extern int hybridswap_page_fault(struct zram *zram, u32 index);
extern bool hybridswap_delete(struct zram *zram, u32 index);
extern ssize_t hybridswap_report_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t hybridswap_stat_snap_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t hybridswap_meminfo_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t hybridswap_core_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len);
extern ssize_t hybridswap_core_enable_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t hybridswap_loop_device_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len);
extern ssize_t hybridswap_loop_device_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t hybridswap_dev_life_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len);
extern ssize_t hybridswap_dev_life_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t hybridswap_quota_day_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len);
extern ssize_t hybridswap_quota_day_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t hybridswap_zram_increase_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len);
extern ssize_t hybridswap_zram_increase_show(struct device *dev,
struct device_attribute *attr, char *buf);
#endif
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
/* 63---48,47--32,31-0 : cgroup id, thread_index, index*/
#define ZRAM_INDEX_SHIFT 32
#define CACHE_INDEX_SHIFT 32
#define CACHE_INDEX_MASK ((1llu << CACHE_INDEX_SHIFT) - 1)
#define ZRAM_INDEX_MASK ((1llu << ZRAM_INDEX_SHIFT) - 1)
#define cache_index_val(index) (((unsigned long)index & CACHE_INDEX_MASK) << ZRAM_INDEX_SHIFT)
#define zram_index_val(id) ((unsigned long)id & ZRAM_INDEX_MASK)
#define mk_page_val(cache_index, index) (cache_index_val(cache_index) | zram_index_val(index))
#define fetch_cache_id(page) ((page->private >> 32) & CACHE_INDEX_MASK)
#define fetch_zram_index(page) (page->private & ZRAM_INDEX_MASK)
#define zram_set_page(zram, index, page) (zram->table[index].page = page)
#define zram_fetch_page(zram, index) (zram->table[index].page)
extern void del_page_from_cache(struct page *page);
extern int add_anon_page2cache(struct zram * zram, u32 index,
struct page *page);
extern ssize_t hybridswap_akcompress_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len);
extern ssize_t hybridswap_akcompress_show(struct device *dev,
struct device_attribute *attr, char *buf);
extern void put_free_page(struct page *page);
extern void put_anon_pages(struct page *page);
extern int akcompress_cache_page_fault(struct zram *zram,
struct page *page, u32 index);
extern void destroy_akcompressd_task(struct zram *zram);
#endif
#ifdef CONFIG_HYBRIDSWAP_SWAPD
extern ssize_t hybridswap_swapd_pause_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len);
extern ssize_t hybridswap_swapd_pause_show(struct device *dev,
struct device_attribute *attr, char *buf);
#endif
static inline bool current_is_swapd(void)
{
#ifdef CONFIG_HYBRIDSWAP_SWAPD
return (strncmp(current->comm, "hybridswapd:", sizeof("hybridswapd:") - 1) == 0);
#else
return false;
#endif
}
#endif /* HYBRIDSWAP_H */

View File

@@ -0,0 +1,575 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2022 Oplus. All rights reserved.
*/
#define pr_fmt(fmt) "[HYBRIDSWAP]" fmt
#include <linux/types.h>
#include <linux/spinlock_types.h>
#include <linux/atomic.h>
#include <linux/idr.h>
#include <linux/freezer.h>
#include "../zram_drv.h"
#include "../zram_drv_internal.h"
#include "hybridswap_internal.h"
#include "hybridswap.h"
struct compress_info_s {
struct list_head free_page_head;
spinlock_t free_lock;
unsigned int free_cnt;
unsigned int max_cnt;
} compress_info;
#define MAX_AKCOMPRESSD_THREADS 4
#define DEFAULT_CACHE_SIZE_MB 64
#define DEFAULT_COMPRESS_BATCH_MB 1
#define DEFAULT_CACHE_COUNT ((DEFAULT_CACHE_SIZE_MB << 20) >> PAGE_SHIFT)
#define WAKEUP_AKCOMPRESSD_WATERMARK ((DEFAULT_COMPRESS_BATCH_MB << 20) >> PAGE_SHIFT)
static wait_queue_head_t akcompressd_wait;
static struct task_struct *akc_task[MAX_AKCOMPRESSD_THREADS];
static atomic64_t akc_cnt[MAX_AKCOMPRESSD_THREADS];
static int akcompressd_threads = 0;
static atomic64_t cached_cnt;
static struct zram *zram_info;
static DEFINE_MUTEX(akcompress_init_lock);
struct idr cached_idr = IDR_INIT(cached_idr);
DEFINE_SPINLOCK(cached_idr_lock);
static void wake_all_akcompressd(void);
void clear_page_memcg(struct cgroup_cache_page *cache)
{
struct list_head *pos;
struct page *page;
spin_lock(&cache->lock);
if (list_empty(&cache->head))
goto out;
list_for_each(pos, &cache->head) {
page = list_entry(pos, struct page, lru);
if (!page->mem_cgroup)
BUG();
page->mem_cgroup = NULL;
}
out:
cache->dead = 1;
spin_unlock(&cache->lock);
}
static inline struct page * fetch_free_page(void)
{
struct page *page = NULL;
spin_lock(&compress_info.free_lock);
if (compress_info.free_cnt > 0) {
if (list_empty(&compress_info.free_page_head))
BUG();
page = lru_to_page(&compress_info.free_page_head);
list_del(&page->lru);
compress_info.free_cnt--;
}
spin_unlock(&compress_info.free_lock);
return page;
}
void put_free_page(struct page *page)
{
set_page_private(page, 0);
spin_lock(&compress_info.free_lock);
list_add_tail(&page->lru, &compress_info.free_page_head);
compress_info.free_cnt++;
spin_unlock(&compress_info.free_lock);
}
static inline struct cgroup_cache_page *find_and_fetch_memcg_cache(int cache_id)
{
struct cgroup_cache_page *cache;
spin_lock(&cached_idr_lock);
cache = (struct cgroup_cache_page *)idr_find(&cached_idr, cache_id);
if (unlikely(!cache)) {
spin_unlock(&cached_idr_lock);
pr_err("cache_id %d cache not find.\n", cache_id);
return NULL;
}
fetch_memcg_cache(container_of(cache, memcg_hybs_t, cache));
spin_unlock(&cached_idr_lock);
return cache;
}
void del_page_from_cache(struct page *page)
{
int cache_id;
struct cgroup_cache_page *cache;
if (!page)
return;
cache_id = fetch_cache_id(page);
if (unlikely(cache_id < 0 || cache_id > MEM_CGROUP_ID_MAX)) {
hybp(HYB_ERR, "page %p cache_id %d index %u is invalid.\n",
page, cache_id, fetch_zram_index(page));
return;
}
cache = find_and_fetch_memcg_cache(cache_id);
if (!cache)
return;
spin_lock(&cache->lock);
list_del(&page->lru);
cache->cnt--;
spin_unlock(&cache->lock);
put_memcg_cache(container_of(cache, memcg_hybs_t, cache));
atomic64_dec(&cached_cnt);
}
void del_page_from_cache_with_cache(struct page *page,
struct cgroup_cache_page *cache)
{
spin_lock(&cache->lock);
list_del(&page->lru);
cache->cnt--;
spin_unlock(&cache->lock);
atomic64_dec(&cached_cnt);
}
void put_anon_pages(struct page *page)
{
memcg_hybs_t *hybs = MEMCGRP_ITEM_DATA(page->mem_cgroup);
spin_lock(&hybs->cache.lock);
list_add(&page->lru, &hybs->cache.head);
hybs->cache.cnt++;
spin_unlock(&hybs->cache.lock);
}
static inline bool can_stop_working(struct cgroup_cache_page *cache, int index)
{
spin_lock(&cache->lock);
if (unlikely(!list_empty(&cache->head))) {
spin_unlock(&cache->lock);
return false;
}
spin_unlock(&cache->lock);
return 1;
}
static int check_cache_state(struct cgroup_cache_page *cache)
{
if (cache->cnt == 0 || cache->compressing == 1)
return 0;
spin_lock(&cache->lock);
if (cache->cnt == 0 || cache->compressing) {
spin_unlock(&cache->lock);
return 0;
}
cache->compressing = 1;
spin_unlock(&cache->lock);
fetch_memcg_cache(container_of(cache, memcg_hybs_t, cache));
return 1;
}
struct cgroup_cache_page *fetch_one_cache(void)
{
struct cgroup_cache_page *cache = NULL;
int id;
spin_lock(&cached_idr_lock);
idr_for_each_entry(&cached_idr, cache, id) {
if (check_cache_state(cache))
break;
}
spin_unlock(&cached_idr_lock);
return cache;
}
void mark_compressing_stop(struct cgroup_cache_page *cache)
{
spin_lock(&cache->lock);
if (cache->dead)
hybp(HYB_WARN, "stop compressing, may be cgroup is delelted\n");
cache->compressing = 0;
spin_unlock(&cache->lock);
put_memcg_cache(container_of(cache, memcg_hybs_t, cache));
}
static inline struct page *fetch_anon_page(struct zram *zram,
struct cgroup_cache_page *cache)
{
struct page *page, *prev_page;
int index;
if (compress_info.free_cnt == 0)
return NULL;
prev_page = NULL;
try_again:
page = NULL;
spin_lock(&cache->lock);
if (!list_empty(&cache->head)) {
page = lru_to_page(&cache->head);
index = fetch_zram_index(page);
}
spin_unlock(&cache->lock);
if (page) {
if (prev_page && (page == prev_page)) {
hybp(HYB_ERR, "zram %p index %d page %p\n",
zram, index, page);
BUG();
}
zram_slot_lock(zram, index);
if (!zram_test_flag(zram, index, ZRAM_CACHED)) {
zram_slot_unlock(zram, index);
prev_page = page;
goto try_again;
}
prev_page = NULL;
zram_clear_flag(zram, index, ZRAM_CACHED);
del_page_from_cache_with_cache(page, cache);
zram_set_flag(zram, index, ZRAM_CACHED_COMPRESS);
zram_slot_unlock(zram, index);
}
return page;
}
int add_anon_page2cache(struct zram * zram, u32 index, struct page *page)
{
struct page *dst_page;
void *src, *dst;
struct mem_cgroup *memcg;
struct cgroup_cache_page *cache;
memcg_hybs_t *hybs;
if (akcompressd_threads == 0)
return 0;
memcg = page->mem_cgroup;
if (!memcg || !MEMCGRP_ITEM_DATA(memcg))
return 0;
hybs = MEMCGRP_ITEM_DATA(memcg);
cache = &hybs->cache;
if (find_and_fetch_memcg_cache(cache->id) != cache)
return 0;
spin_lock(&cache->lock);
if (cache->dead == 1) {
spin_unlock(&cache->lock);
return 0;
}
spin_unlock(&cache->lock);
dst_page = fetch_free_page();
if (!dst_page)
return 0;
src = kmap_atomic(page);
dst = kmap_atomic(dst_page);
memcpy(dst, src, PAGE_SIZE);
kunmap_atomic(src);
kunmap_atomic(dst);
dst_page->mem_cgroup = memcg;
set_page_private(dst_page, mk_page_val(cache->id, index));
update_zram_index(zram, index, (unsigned long)dst_page);
atomic64_inc(&cached_cnt);
wake_all_akcompressd();
hybp(HYB_DEBUG, "add_anon_page2cache index %u page %p passed\n",
index, dst_page);
return 1;
}
static inline void akcompressd_try_to_sleep(wait_queue_head_t *waitq)
{
DEFINE_WAIT(wait);
prepare_to_wait(waitq, &wait, TASK_INTERRUPTIBLE);
freezable_schedule();
finish_wait(waitq, &wait);
}
static int akcompressd_func(void *data)
{
struct page *page;
int ret, thread_index;
struct list_head compress_fail_list;
struct cgroup_cache_page *cache = NULL;
thread_index = (int)data;
if (thread_index < 0 || thread_index >= MAX_AKCOMPRESSD_THREADS) {
hybp(HYB_ERR, "akcompress task index %d is invalid.\n", thread_index);
return -EINVAL;
}
set_freezable();
while (!kthread_should_stop()) {
akcompressd_try_to_sleep(&akcompressd_wait);
count_swapd_event(AKCOMPRESSD_WAKEUP);
cache = fetch_one_cache();
if (!cache)
continue;
finish_last_jobs:
INIT_LIST_HEAD(&compress_fail_list);
page = fetch_anon_page(zram_info, cache);
while (page) {
ret = async_compress_page(zram_info, page);
put_memcg_cache(container_of(cache, memcg_hybs_t, cache));
if (ret)
list_add(&page->lru, &compress_fail_list);
else {
atomic64_inc(&akc_cnt[thread_index]);
page->mem_cgroup = NULL;
put_free_page(page);
}
page = fetch_anon_page(zram_info, cache);
}
if (!list_empty(&compress_fail_list))
hybp(HYB_ERR, "have some compress failed pages.\n");
if (kthread_should_stop()) {
if (!can_stop_working(cache, thread_index))
goto finish_last_jobs;
}
mark_compressing_stop(cache);
}
return 0;
}
static int update_akcompressd_threads(int thread_count, struct zram *zram)
{
int drop, increase;
int last_index, start_index, hid;
static DEFINE_MUTEX(update_lock);
if (thread_count < 0 || thread_count > MAX_AKCOMPRESSD_THREADS) {
hybp(HYB_ERR, "thread_count %d is invalid\n", thread_count);
return -EINVAL;
}
mutex_lock(&update_lock);
if (!zram_info || zram_info != zram)
zram_info = zram;
if (thread_count == akcompressd_threads) {
mutex_unlock(&update_lock);
return thread_count;
}
last_index = akcompressd_threads - 1;
if (thread_count < akcompressd_threads) {
drop = akcompressd_threads - thread_count;
for (hid = last_index; hid > (last_index - drop); hid--) {
if (akc_task[hid]) {
kthread_stop(akc_task[hid]);
akc_task[hid] = NULL;
}
}
} else {
increase = thread_count - akcompressd_threads;
start_index = last_index + 1;
for (hid = start_index; hid < (start_index + increase); hid++) {
if (unlikely(akc_task[hid]))
BUG();
akc_task[hid]= kthread_run(akcompressd_func,
(void*)(unsigned long)hid, "akcompressd:%d", hid);
if (IS_ERR(akc_task[hid])) {
pr_err("Failed to start akcompressd%d\n", hid);
akc_task[hid] = NULL;
break;
}
}
}
hybp(HYB_INFO, "akcompressd_threads count changed, old:%d new:%d\n",
akcompressd_threads, thread_count);
akcompressd_threads = thread_count;
mutex_unlock(&update_lock);
return thread_count;
}
static void wake_all_akcompressd(void)
{
if (atomic64_read(&cached_cnt) < WAKEUP_AKCOMPRESSD_WATERMARK)
return;
if (!waitqueue_active(&akcompressd_wait))
return;
wake_up_interruptible(&akcompressd_wait);
}
int create_akcompressd_task(struct zram *zram)
{
return update_akcompressd_threads(1, zram) != 1;
}
void destroy_akcompressd_task(struct zram *zram)
{
(void)update_akcompressd_threads(0, zram);
}
ssize_t hybridswap_akcompress_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
int ret;
unsigned long val;
struct zram *zram = dev_to_zram(dev);
ret = kstrtoul(buf, 0, &val);
if (unlikely(ret)) {
hybp(HYB_ERR, "val is error!\n");
return -EINVAL;
}
ret = update_akcompressd_threads(val, zram);
if (ret < 0) {
hybp(HYB_ERR, "create task failed, val %d\n", val);
return ret;
}
return len;
}
ssize_t hybridswap_akcompress_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int len = 0, id, i;
struct cgroup_cache_page *cache = NULL;
unsigned long cnt = atomic64_read(&cached_cnt);
memcg_hybs_t *hybs;
len += sprintf(buf + len, "akcompressd_threads: %d\n", akcompressd_threads);
len += sprintf(buf + len, "cached page cnt: %lu\n", cnt);
len += sprintf(buf + len, "free page cnt: %u\n", compress_info.free_cnt);
for (i = 0; i < MAX_AKCOMPRESSD_THREADS; i++)
len += sprintf(buf + len, "%-d %-d\n", i, atomic64_read(&akc_cnt[i]));
if (cnt == 0)
return len;
spin_lock(&cached_idr_lock);
idr_for_each_entry(&cached_idr, cache, id) {
hybs = container_of(cache, memcg_hybs_t, cache);
if (cache->cnt == 0)
continue;
len += scnprintf(buf + len, PAGE_SIZE - len, "%s %d\n",
hybs->name, cache->cnt);
if (len >= PAGE_SIZE)
break;
}
spin_unlock(&cached_idr_lock);
return len;
}
void __init akcompressd_pre_init(void)
{
int i;
struct page *page;
mutex_lock(&akcompress_init_lock);
INIT_LIST_HEAD(&compress_info.free_page_head);
spin_lock_init(&compress_info.free_lock);
compress_info.free_cnt = 0;
init_waitqueue_head(&akcompressd_wait);
atomic64_set(&cached_cnt, 0);
for (i = 0; i < MAX_AKCOMPRESSD_THREADS; i++)
atomic64_set(&akc_cnt[i], 0);
for (i = 0; i < DEFAULT_CACHE_COUNT; i ++) {
page = alloc_page(GFP_KERNEL);
if (page) {
list_add_tail(&page->lru, &compress_info.free_page_head);
} else
break;
}
compress_info.free_cnt = i;
mutex_unlock(&akcompress_init_lock);
}
void __exit akcompressd_pre_deinit(void)
{
int i;
struct page *page, *tmp;
mutex_lock(&akcompress_init_lock);
if (list_empty(&compress_info.free_page_head))
goto out;
list_for_each_entry_safe(page, tmp, &compress_info.free_page_head , lru) {
list_del(&page->lru);
free_page(page);
}
out:
compress_info.free_cnt = 0;
mutex_unlock(&akcompress_init_lock);
}
int akcompress_cache_page_fault(struct zram *zram,
struct page *page, u32 index)
{
void *src, *dst;
if (zram_test_flag(zram, index, ZRAM_CACHED)) {
struct page *src_page = (struct page *)zram_fetch_page(zram, index);
src = kmap_atomic(src_page);
dst = kmap_atomic(page);
memcpy(dst, src, PAGE_SIZE);
kunmap_atomic(src);
kunmap_atomic(dst);
zram_slot_unlock(zram, index);
hybp(HYB_DEBUG, "read_anon_page_from_cache index %u page %p passed, ZRAM_CACHED\n",
index, src_page);
return 1;
}
if (zram_test_flag(zram, index, ZRAM_CACHED_COMPRESS)) {
struct page *src_page = (struct page *)zram_fetch_page(zram, index);
src = kmap_atomic(src_page);
dst = kmap_atomic(page);
memcpy(dst, src, PAGE_SIZE);
kunmap_atomic(src);
kunmap_atomic(dst);
zram_slot_unlock(zram, index);
hybp(HYB_DEBUG, "read_anon_page_from_cache index %u page %p passed, ZRAM_CACHED_COMPRESS\n",
index, src_page);
return 1;
}
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,553 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2022 Oplus. All rights reserved.
*/
#ifndef HYBRIDSWAP_INTERNAL_H
#define HYBRIDSWAP_INTERNAL_H
#include <linux/sched.h>
#include <linux/zsmalloc.h>
#include <linux/timer.h>
#include <linux/device.h>
#include <linux/memcontrol.h>
#define ESWAP_SHIFT 15
#define ESWAP_SIZE (1UL << ESWAP_SHIFT)
#define ESWAP_PG_CNT (ESWAP_SIZE >> PAGE_SHIFT)
#define ESWAP_SECTOR_SIZE (ESWAP_PG_CNT << 3)
#define ESWAP_MAX_OBJ_CNT (30 * ESWAP_PG_CNT)
#define ESWAP_MASK (~(ESWAP_SIZE - 1))
#define ESWAP_ALIGN_UP(size) ((size + ESWAP_SIZE - 1) & ESWAP_MASK)
#define MAX_FAIL_RECORD_NUM 4
#define MAX_APP_GRADE 600
#define HYBRIDSWAP_QUOTA_DAY 0x280000000 /* 10G bytes */
#define HYBRIDSWAP_CHECK_GAP 86400 /* 24 hour */
#define MEM_CGROUP_NAME_MAX_LEN 32
#define MAX_RATIO 100
#define MIN_RATIO 0
enum {
HYB_ERR = 0,
HYB_WARN,
HYB_INFO,
HYB_DEBUG,
HYB_MAX
};
void hybridswap_loglevel_set(int level);
int hybridswap_loglevel(void);
#define DUMP_STACK_ON_ERR 0
#define pt(l, f, ...) pr_err("[%s]<%d:%s>:"f, #l, __LINE__, __func__, ##__VA_ARGS__)
static inline void pr_none(void) {}
#define hybp(l, f, ...) do {\
(l <= hybridswap_loglevel()) ? pt(l, f, ##__VA_ARGS__) : pr_none();\
if (DUMP_STACK_ON_ERR && l == HYB_ERR) dump_stack();\
} while (0)
enum hybridswap_class {
HYB_RECLAIM_IN = 0,
HYB_FAULT_OUT,
HYB_BATCH_OUT,
HYB_PRE_OUT,
HYB_CLASS_BUTT
};
enum hybridswap_key_point {
HYB_START = 0,
HYB_INIT,
HYB_IOENTRY_ALLOC,
HYB_FIND_ESWAP,
HYB_IO_ESWAP,
HYB_SEGMENT_ALLOC,
HYB_BIO_ALLOC,
HYB_SUBMIT_BIO,
HYB_END_IO,
HYB_SCHED_WORK,
HYB_END_WORK,
HYB_CALL_BACK,
HYB_WAKE_UP,
HYB_ZRAM_LOCK,
HYB_DONE,
HYB_KYE_POINT_BUTT
};
enum hybridswap_mcg_member {
MCG_ZRAM_STORED_SZ = 0,
MCG_ZRAM_STORED_PG_SZ,
MCG_DISK_STORED_SZ,
MCG_DISK_STORED_PG_SZ,
MCG_ANON_FAULT_CNT,
MCG_DISK_FAULT_CNT,
MCG_ESWAPOUT_CNT,
MCG_ESWAPOUT_SZ,
MCG_ESWAPIN_CNT,
MCG_ESWAPIN_SZ,
MCG_DISK_SPACE,
MCG_DISK_SPACE_PEAK,
};
enum hybridswap_fail_point {
HYB_FAULT_OUT_INIT_FAIL = 0,
HYB_FAULT_OUT_ENTRY_ALLOC_FAIL,
HYB_FAULT_OUT_IO_ENTRY_PARA_FAIL,
HYB_FAULT_OUT_SEGMENT_ALLOC_FAIL,
HYB_FAULT_OUT_BIO_ALLOC_FAIL,
HYB_FAULT_OUT_BIO_ADD_FAIL,
HYB_FAULT_OUT_IO_FAIL,
HYBRIDSWAP_FAIL_POINT_BUTT
};
struct hybridswap_fail_record {
unsigned char task_comm[TASK_COMM_LEN];
enum hybridswap_fail_point point;
ktime_t time;
u32 index;
int eswapid;
};
struct hybridswap_fail_record_info {
int num;
spinlock_t lock;
struct hybridswap_fail_record record[MAX_FAIL_RECORD_NUM];
};
struct hybridswap_key_point_info {
unsigned int record_cnt;
unsigned int end_cnt;
ktime_t first_time;
ktime_t last_time;
s64 proc_total_time;
s64 proc_max_time;
unsigned long long last_ravg_sum;
unsigned long long proc_ravg_sum;
spinlock_t time_lock;
};
struct hybridswap_key_point_record {
struct timer_list lat_monitor;
unsigned long warn_level;
int page_cnt;
int segment_cnt;
int nice;
bool timeout_flag;
unsigned char task_comm[TASK_COMM_LEN];
struct task_struct *task;
enum hybridswap_class class;
struct hybridswap_key_point_info key_point[HYB_KYE_POINT_BUTT];
};
struct hybridswapiowrkstat {
atomic64_t total_lat;
atomic64_t max_lat;
atomic64_t timeout_cnt;
};
struct hybridswap_fault_timeout_cnt{
atomic64_t timeout_100ms_cnt;
atomic64_t timeout_500ms_cnt;
};
struct hybstatus {
atomic64_t reclaimin_cnt;
atomic64_t reclaimin_bytes;
atomic64_t reclaimin_real_load;
atomic64_t reclaimin_bytes_daily;
atomic64_t reclaimin_pages;
atomic64_t reclaimin_infight;
atomic64_t batchout_cnt;
atomic64_t batchout_bytes;
atomic64_t batchout_real_load;
atomic64_t batchout_pages;
atomic64_t batchout_inflight;
atomic64_t fault_cnt;
atomic64_t hybridswap_fault_cnt;
atomic64_t reout_pages;
atomic64_t reout_bytes;
atomic64_t zram_stored_pages;
atomic64_t zram_stored_size;
atomic64_t stored_pages;
atomic64_t stored_size;
atomic64_t notify_free;
atomic64_t frag_cnt;
atomic64_t mcg_cnt;
atomic64_t eswap_cnt;
atomic64_t miss_free;
atomic64_t memcgid_clear;
atomic64_t skip_track_cnt;
atomic64_t used_swap_pages;
atomic64_t null_memcg_skip_track_cnt;
atomic64_t stored_wm_scale;
atomic64_t dropped_eswap_size;
atomic64_t io_fail_cnt[HYB_CLASS_BUTT];
atomic64_t alloc_fail_cnt[HYB_CLASS_BUTT];
struct hybridswapiowrkstat lat[HYB_CLASS_BUTT];
struct hybridswap_fault_timeout_cnt fault_stat[2]; /* 0:bg 1:fg */
struct hybridswap_fail_record_info record;
};
struct hybridswap_page_pool {
struct list_head page_pool_list;
spinlock_t page_pool_lock;
};
struct io_eswapent {
int eswapid;
struct zram *zram;
struct mem_cgroup *mcg;
struct page *pages[ESWAP_PG_CNT];
u32 index[ESWAP_MAX_OBJ_CNT];
int cnt;
int real_load;
struct hybridswap_page_pool *pool;
};
struct hybridswap_buffer {
struct zram *zram;
struct hybridswap_page_pool *pool;
struct page **dest_pages;
};
struct hybridswap_entry {
int eswapid;
sector_t addr;
struct page **dest_pages;
int pages_sz;
struct list_head list;
void *private;
void *manager_private;
};
struct hybridswap_io_req;
struct hybridswap_io {
struct block_device *bdev;
enum hybridswap_class class;
void (*done_callback)(struct hybridswap_entry *, int, struct hybridswap_io_req *);
void (*complete_notify)(void *);
void *private;
struct hybridswap_key_point_record *record;
};
struct hybridswap_io_req {
struct hybridswap_io io_para;
struct kref refcount;
struct mutex refmutex;
struct wait_queue_head io_wait;
atomic_t eswap_doing;
struct completion io_end_flag;
struct hyb_sgm *segment;
bool limit_doing_flag;
bool wait_io_finish_flag;
int page_cnt;
int segment_cnt;
int nice;
atomic64_t real_load;
};
/* Change hybridswap_event_item, you should change swapd_text togather*/
enum hybridswap_event_item {
#ifdef CONFIG_HYBRIDSWAP_SWAPD
SWAPD_WAKEUP,
SWAPD_REFAULT,
SWAPD_MEMCG_RATIO_SKIP,
SWAPD_MEMCG_REFAULT_SKIP,
SWAPD_SHRINK_ANON,
SWAPD_SWAPOUT,
SWAPD_SKIP_SWAPOUT,
SWAPD_EMPTY_ROUND,
SWAPD_OVER_MIN_BUFFER_SKIP_TIMES,
SWAPD_EMPTY_ROUND_SKIP_TIMES,
SWAPD_SNAPSHOT_TIMES,
SWAPD_SKIP_SHRINK_OF_WINDOW,
SWAPD_MANUAL_PAUSE,
#ifdef CONFIG_OPLUS_JANK
SWAPD_CPU_BUSY_SKIP_TIMES,
SWAPD_CPU_BUSY_BREAK_TIMES,
#endif
#endif
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
AKCOMPRESSD_WAKEUP,
#endif
NR_EVENT_ITEMS
};
struct swapd_event_state {
unsigned long event[NR_EVENT_ITEMS];
};
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
struct cgroup_cache_page {
spinlock_t lock;
struct list_head head;
unsigned int cnt;
int id;
char compressing;
char dead;
};
#endif
typedef struct mem_cgroup_hybridswap {
#ifdef CONFIG_HYBRIDSWAP
atomic64_t ufs2zram_scale;
atomic_t zram2ufs_scale;
atomic64_t app_grade;
atomic64_t app_uid;
struct list_head grade_node;
char name[MEM_CGROUP_NAME_MAX_LEN];
struct zram *zram;
struct mem_cgroup *memcg;
refcount_t usage;
#endif
#ifdef CONFIG_HYBRIDSWAP_SWAPD
atomic_t mem2zram_scale;
atomic_t pagefault_level;
unsigned long long reclaimed_pagefault;
long long can_reclaimed;
#endif
#ifdef CONFIG_HYBRIDSWAP_CORE
unsigned long swap_sorted_list;
unsigned long eswap_lru;
struct list_head link_list;
spinlock_t zram_init_lock;
long long can_eswaped;
atomic64_t zram_stored_size;
atomic64_t zram_page_size;
unsigned long zram_watermark;
atomic_t hybridswap_extcnt;
atomic_t hybridswap_peakextcnt;
atomic64_t hybridswap_stored_pages;
atomic64_t hybridswap_stored_size;
atomic64_t hybridswap_eswap_notify_free;
atomic64_t hybridswap_outcnt;
atomic64_t hybridswap_incnt;
atomic64_t hybridswap_allfaultcnt;
atomic64_t hybridswap_faultcnt;
atomic64_t hybridswap_outextcnt;
atomic64_t hybridswap_inextcnt;
struct mutex swap_lock;
bool in_swapin;
bool force_swapout;
#endif
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
struct cgroup_cache_page cache;
#endif
}memcg_hybs_t;
#define MEMCGRP_ITEM_DATA(memcg) ((memcg_hybs_t *)(memcg)->android_oem_data1)
#define MEMCGRP_ITEM(memcg, item) (MEMCGRP_ITEM_DATA(memcg)->item)
extern void __put_memcg_cache(memcg_hybs_t *hybs);
static inline memcg_hybs_t *fetch_memcg_cache(memcg_hybs_t *hybs)
{
refcount_inc(&hybs->usage);
return hybs;
}
static inline void put_memcg_cache(memcg_hybs_t *hybs)
{
if (refcount_dec_and_test(&hybs->usage))
__put_memcg_cache(hybs);
}
DECLARE_PER_CPU(struct swapd_event_state, swapd_event_states);
extern struct mutex reclaim_para_lock;
static inline void __count_swapd_event(enum hybridswap_event_item item)
{
raw_cpu_inc(swapd_event_states.event[item]);
}
static inline void count_swapd_event(enum hybridswap_event_item item)
{
this_cpu_inc(swapd_event_states.event[item]);
}
static inline void __count_swapd_events(enum hybridswap_event_item item, long delta)
{
raw_cpu_add(swapd_event_states.event[item], delta);
}
static inline void count_swapd_events(enum hybridswap_event_item item, long delta)
{
this_cpu_add(swapd_event_states.event[item], delta);
}
void *hybridswap_malloc(size_t size, bool fast, bool nofail);
void hybridswap_free(const void *mem);
unsigned long hybridswap_zsmalloc(struct zs_pool *zs_pool,
size_t size, struct hybridswap_page_pool *pool);
struct page *hybridswap_alloc_page(
struct hybridswap_page_pool *pool, gfp_t gfp,
bool fast, bool nofail);
void hybridswap_page_recycle(struct page *page,
struct hybridswap_page_pool *pool);
struct hybstatus *hybridswap_fetch_stat_obj(void);
int hybridswap_manager_init(struct zram *zram);
void hybridswap_manager_memcg_init(struct zram *zram,
struct mem_cgroup *memcg);
void hybridswap_manager_memcg_deinit(struct mem_cgroup *mcg);
void hybridswap_swap_sorted_list_add(struct zram *zram, u32 index,
struct mem_cgroup *memcg);
void hybridswap_swap_sorted_list_del(struct zram *zram, u32 index);
unsigned long hybridswap_eswap_create(struct mem_cgroup *memcg,
int *eswapid,
struct hybridswap_buffer *dest_buf,
void **private);
void hybridswap_eswap_register(void *private, struct hybridswap_io_req *req);
void hybridswap_eswap_objs_del(struct zram *zram, u32 index);
int hybridswap_find_eswap_by_index(
unsigned long eswpentry, struct hybridswap_buffer *buf, void **private);
int hybridswap_find_eswap_by_memcg(
struct mem_cgroup *mcg,
struct hybridswap_buffer *dest_buf, void **private);
void hybridswap_eswap_destroy(void *private, enum hybridswap_class class);
void hybridswap_eswap_exception(enum hybridswap_class class,
void *private);
void hybridswap_manager_deinit(struct zram *zram);
struct mem_cgroup *hybridswap_zram_fetch_mcg(struct zram *zram, u32 index);
int hyb_io_work_begin(void);
void *hybridswap_plug_start(struct hybridswap_io *io_para);
int hybridswap_read_eswap(void *iohandle,
struct hybridswap_entry *ioentry);
int hybridswap_write_eswap(void *iohandle,
struct hybridswap_entry *ioentry);
int hybridswap_plug_finish(void *iohandle);
void hybperf_start(
struct hybridswap_key_point_record *record,
ktime_t stsrt, unsigned long long start_ravg_sum,
enum hybridswap_class class);
void hybperf_end(struct hybridswap_key_point_record *record);
void hybperfiowrkstart(
struct hybridswap_key_point_record *record,
enum hybridswap_key_point type);
void hybperfiowrkend(
struct hybridswap_key_point_record *record,
enum hybridswap_key_point type);
void hybperfiowrkpoint(
struct hybridswap_key_point_record *record,
enum hybridswap_key_point type);
void hybperf_async_perf(
struct hybridswap_key_point_record *record,
enum hybridswap_key_point type, ktime_t start,
unsigned long long start_ravg_sum);
void hybperf_io_stat(
struct hybridswap_key_point_record *record, int page_cnt,
int segment_cnt);
static inline unsigned long long hybridswap_fetch_ravg_sum(void)
{
return 0;
}
void hybridswap_fail_record(enum hybridswap_fail_point point,
u32 index, int eswapid, unsigned char *task_comm);
bool hybridswap_reach_life_protect(void);
struct workqueue_struct *hybridswap_fetch_reclaim_workqueue(void);
extern struct mem_cgroup *fetch_next_memcg(struct mem_cgroup *prev);
extern void fetch_next_memcg_break(struct mem_cgroup *prev);
extern memcg_hybs_t *hybridswap_cache_alloc(struct mem_cgroup *memcg, bool atomic);
extern void memcg_app_grade_resort(void);
extern unsigned long memcg_anon_pages(struct mem_cgroup *memcg);
#ifdef CONFIG_HYBRIDSWAP_CORE
extern bool hybridswap_core_enabled(void);
extern bool hybridswap_out_to_eswap_enable(void);
extern void hybridswap_mem_cgroup_deinit(struct mem_cgroup *memcg);
extern unsigned long hybridswap_out_to_eswap(unsigned long size);
extern int hybridswap_batches(struct mem_cgroup *mcg,
unsigned long size, bool preload);
extern unsigned long zram_zsmalloc(struct zs_pool *zs_pool,
size_t size, gfp_t gfp);
extern struct task_struct *fetch_task_from_proc(struct inode *inode);
extern unsigned long long hybridswap_fetch_zram_pagefault(void);
extern bool hybridswap_reclaim_work_running(void);
extern void hybridswap_force_reclaim(struct mem_cgroup *mcg);
extern bool hybridswap_stored_wm_ok(void);
extern void mem_cgroup_id_remove_hook(void *data, struct mem_cgroup *memcg);
extern int mem_cgroup_stored_wm_scale_write(
struct cgroup_subsys_state *css, struct cftype *cft, s64 val);
extern s64 mem_cgroup_stored_wm_scale_read(
struct cgroup_subsys_state *css, struct cftype *cft);
extern bool hybridswap_delete(struct zram *zram, u32 index);
extern int hybridswap_stored_info(unsigned long *total, unsigned long *used);
extern unsigned long long hybridswap_read_mcg_stats(
struct mem_cgroup *mcg, enum hybridswap_mcg_member mcg_member);
extern int hybridswap_core_enable(void);
extern void hybridswap_core_disable(void);
extern int hybridswap_psi_show(struct seq_file *m, void *v);
#else
static inline unsigned long long hybridswap_read_mcg_stats(
struct mem_cgroup *mcg, enum hybridswap_mcg_member mcg_member)
{
return 0;
}
static inline unsigned long long hybridswap_fetch_zram_pagefault(void)
{
return 0;
}
static inline bool hybridswap_reclaim_work_running(void)
{
return false;
}
static inline bool hybridswap_core_enabled(void) { return false; }
static inline bool hybridswap_out_to_eswap_enable(void) { return false; }
#endif
#ifdef CONFIG_HYBRIDSWAP_SWAPD
extern atomic_long_t page_fault_pause;
extern atomic_long_t page_fault_pause_cnt;
extern struct cftype mem_cgroup_swapd_legacy_files[];
extern bool zram_watermark_ok(void);
extern void wake_all_swapd(void);
extern void alloc_pages_slowpath_hook(void *data, gfp_t gfp_mask,
unsigned int order, unsigned long delta);
extern void rmqueue_hook(void *data, struct zone *preferred_zone,
struct zone *zone, unsigned int order, gfp_t gfp_flags,
unsigned int alloc_flags, int migratetype);
extern void __init swapd_pre_init(void);
extern void swapd_pre_deinit(void);
extern void update_swapd_mcg_setup(struct mem_cgroup *memcg);
extern bool free_zram_is_ok(void);
extern unsigned long fetch_nr_zram_total(void);
extern int swapd_init(struct zram *zram);
extern void swapd_exit(void);
extern bool hybridswap_swapd_enabled(void);
#else
static inline bool hybridswap_swapd_enabled(void) { return false; }
#endif
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
extern spinlock_t cached_idr_lock;
extern struct idr cached_idr;
extern void __init akcompressd_pre_init(void);
extern void __exit akcompressd_pre_deinit(void);
extern int create_akcompressd_task(struct zram *zram);
extern void clear_page_memcg(struct cgroup_cache_page *cache);
#endif
#endif /* end of HYBRIDSWAP_INTERNAL_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -35,6 +35,10 @@
#include <linux/cpuhotplug.h>
#include "zram_drv.h"
#include "zram_drv_internal.h"
#ifdef CONFIG_HYBRIDSWAP
#include "hybridswap/hybridswap.h"
#endif
static DEFINE_IDR(zram_index_idr);
/* idr index must be protected */
@@ -55,32 +59,14 @@ static void zram_free_page(struct zram *zram, size_t index);
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
u32 index, int offset, struct bio *bio);
unsigned long znr_swap_pages;
bool is_enable_zlimit;
static int zram_slot_trylock(struct zram *zram, u32 index)
{
return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
}
static void zram_slot_lock(struct zram *zram, u32 index)
{
bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags);
}
static void zram_slot_unlock(struct zram *zram, u32 index)
{
bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
}
static inline bool init_done(struct zram *zram)
{
return zram->disksize;
}
static inline struct zram *dev_to_zram(struct device *dev)
{
return (struct zram *)dev_to_disk(dev)->private_data;
}
static struct zram_entry *zram_get_entry(struct zram *zram, u32 index)
{
return zram->table[index].entry;
@@ -92,49 +78,11 @@ static void zram_set_entry(struct zram *zram, u32 index,
zram->table[index].entry = entry;
}
/* flag operations require table entry bit_spin_lock() being held */
static bool zram_test_flag(struct zram *zram, u32 index,
enum zram_pageflags flag)
{
return zram->table[index].flags & BIT(flag);
}
static void zram_set_flag(struct zram *zram, u32 index,
enum zram_pageflags flag)
{
zram->table[index].flags |= BIT(flag);
}
static void zram_clear_flag(struct zram *zram, u32 index,
enum zram_pageflags flag)
{
zram->table[index].flags &= ~BIT(flag);
}
static inline void zram_set_element(struct zram *zram, u32 index,
unsigned long element)
{
zram->table[index].element = element;
}
static unsigned long zram_get_element(struct zram *zram, u32 index)
{
return zram->table[index].element;
}
static size_t zram_get_obj_size(struct zram *zram, u32 index)
{
return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
}
static void zram_set_obj_size(struct zram *zram,
u32 index, size_t size)
{
unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT;
zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
}
static inline bool zram_allocated(struct zram *zram, u32 index)
{
return zram_get_obj_size(zram, index) ||
@@ -259,6 +207,11 @@ static ssize_t mem_limit_store(struct device *dev,
down_write(&zram->init_lock);
zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
if (zram->limit_pages) {
is_enable_zlimit = true;
znr_swap_pages = zram->limit_pages * 3;
} else
is_enable_zlimit = false;
up_write(&zram->init_lock);
return len;
@@ -325,6 +278,10 @@ static ssize_t idle_store(struct device *dev,
up_read(&zram->init_lock);
#ifdef CONFIG_ZWB_HANDLE
wake_up_process(zwb_clear_tsk);
#endif
return len;
}
@@ -785,6 +742,7 @@ next:
free_block_bdev(zram, blk_idx);
ret = len;
__free_page(page);
ksys_sync();
release_init_lock:
up_read(&zram->init_lock);
@@ -1294,9 +1252,31 @@ static void zram_free_page(struct zram *zram, size_t index)
atomic64_dec(&zram->stats.huge_pages);
}
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
if (zram_test_flag(zram, index, ZRAM_CACHED)) {
struct page *page = (struct page *)zram_get_page(zram, index);
del_page_from_cache(page);
page->mem_cgroup = NULL;
put_free_page(page);
zram_clear_flag(zram, index, ZRAM_CACHED);
goto out;
}
if (zram_test_flag(zram, index, ZRAM_CACHED_COMPRESS)) {
zram_clear_flag(zram, index, ZRAM_CACHED_COMPRESS);
goto out;
}
#endif
#ifdef CONFIG_HYBRIDSWAP_CORE
hybridswap_untrack(zram, index);
#endif
if (zram_test_flag(zram, index, ZRAM_WB)) {
zram_clear_flag(zram, index, ZRAM_WB);
free_block_bdev(zram, zram_get_element(zram, index));
atomic64_dec(&zram->stats.pages_stored);
goto out;
}
@@ -1307,6 +1287,7 @@ static void zram_free_page(struct zram *zram, size_t index)
if (zram_test_flag(zram, index, ZRAM_SAME)) {
zram_clear_flag(zram, index, ZRAM_SAME);
atomic64_dec(&zram->stats.same_pages);
atomic64_dec(&zram->stats.pages_stored);
goto out;
}
@@ -1318,14 +1299,134 @@ static void zram_free_page(struct zram *zram, size_t index)
atomic64_sub(zram_get_obj_size(zram, index),
&zram->stats.compr_data_size);
out:
atomic64_dec(&zram->stats.pages_stored);
out:
zram_set_entry(zram, index, NULL);
zram_set_obj_size(zram, index, 0);
WARN_ON_ONCE(zram->table[index].flags &
~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
}
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
void update_zram_index(struct zram *zram, u32 index, unsigned long page)
{
zram_slot_lock(zram, index);
put_anon_pages((struct page*)page);
zram_free_page(zram, index);
zram_set_flag(zram, index, ZRAM_CACHED);
zram_set_page(zram, index, page);
zram_set_obj_size(zram, index, PAGE_SIZE);
zram_slot_unlock(zram, index);
}
int async_compress_page(struct zram *zram, struct page* page)
{
int ret = 0;
unsigned long alloced_pages;
unsigned long handle = 0;
unsigned int comp_len = 0;
void *src, *dst;
struct zcomp_strm *zstrm;
int index = get_zram_index(page);
compress_again:
zram_slot_lock(zram, index);
if (!zram_test_flag(zram, index, ZRAM_CACHED_COMPRESS)) {
zram_slot_unlock(zram, index);
return 0;
}
zram_slot_unlock(zram, index);
zstrm = zcomp_stream_get(zram->comp);
src = kmap_atomic(page);
ret = zcomp_compress(zstrm, src, &comp_len);
kunmap_atomic(src);
if (unlikely(ret)) {
zcomp_stream_put(zram->comp);
pr_err("Compression failed! err=%d\n", ret);
zs_free(zram->mem_pool, handle);
return ret;
}
if (comp_len >= huge_class_size)
comp_len = PAGE_SIZE;
if (!handle)
handle = zs_malloc(zram->mem_pool, comp_len,
__GFP_KSWAPD_RECLAIM |
__GFP_NOWARN |
__GFP_HIGHMEM |
__GFP_MOVABLE |
__GFP_CMA);
if (!handle) {
zcomp_stream_put(zram->comp);
atomic64_inc(&zram->stats.writestall);
handle = zs_malloc(zram->mem_pool, comp_len,
GFP_NOIO | __GFP_HIGHMEM |
__GFP_MOVABLE | __GFP_CMA |
GFP_ATOMIC | ___GFP_HIGH_ATOMIC_ZRAM);
if (handle)
goto compress_again;
return -ENOMEM;
}
alloced_pages = zs_get_total_pages(zram->mem_pool);
update_used_max(zram, alloced_pages);
if (zram->limit_pages && alloced_pages > zram->limit_pages) {
zcomp_stream_put(zram->comp);
zs_free(zram->mem_pool, handle);
return -ENOMEM;
}
dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
src = zstrm->buffer;
if (comp_len == PAGE_SIZE)
src = kmap_atomic(page);
memcpy(dst, src, comp_len);
if (comp_len == PAGE_SIZE)
kunmap_atomic(src);
zcomp_stream_put(zram->comp);
zs_unmap_object(zram->mem_pool, handle);
atomic64_add(comp_len, &zram->stats.compr_data_size);
/*
* Free memory associated with this sector
* before overwriting unused sectors.
*/
zram_slot_lock(zram, index);
if (!zram_test_flag(zram, index, ZRAM_CACHED_COMPRESS)) {
atomic64_sub(comp_len, &zram->stats.compr_data_size);
zs_free(zram->mem_pool, handle);
zram_slot_unlock(zram, index);
return 0;
}
zram_free_page(zram, index);
if (comp_len == PAGE_SIZE) {
zram_set_flag(zram, index, ZRAM_HUGE);
atomic64_inc(&zram->stats.huge_pages);
}
zram_set_handle(zram, index, handle);
zram_set_obj_size(zram, index, comp_len);
#ifdef CONFIG_HYBRIDSWAP_CORE
hybridswap_record(zram, index, page->mem_cgroup);
#endif
zram_slot_unlock(zram, index);
/* Update stats */
atomic64_inc(&zram->stats.pages_stored);
return ret;
}
#endif
static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
struct bio *bio, bool partial_io)
{
@@ -1335,6 +1436,22 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
void *src, *dst;
zram_slot_lock(zram, index);
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
if (akcompress_cache_page_fault(zram, page, index))
return 0;
#endif
#ifdef CONFIG_HYBRIDSWAP_CORE
if (likely(!bio)) {
ret = hybridswap_page_fault(zram, index);
if (unlikely(ret)) {
pr_err("search in hybridswap failed! err=%d, page=%u\n",
ret, index);
zram_slot_unlock(zram, index);
return ret;
}
}
#endif
if (zram_test_flag(zram, index, ZRAM_WB)) {
struct bio_vec bvec;
@@ -1451,6 +1568,13 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
goto out;
}
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
if ((current_is_kswapd() || current_is_swapd(current)) &&
add_anon_page2cache(zram, index, page)) {
return 0;
}
#endif
compress_again:
zstrm = zcomp_stream_get(zram->comp);
src = kmap_atomic(page);
@@ -1492,7 +1616,7 @@ compress_again:
atomic64_inc(&zram->stats.writestall);
entry = zram_entry_alloc(zram, comp_len,
GFP_NOIO | __GFP_HIGHMEM |
__GFP_MOVABLE | __GFP_CMA);
__GFP_MOVABLE | __GFP_CMA | GFP_ATOMIC | ___GFP_HIGH_ATOMIC_ZRAM);
if (entry)
goto compress_again;
return -ENOMEM;
@@ -1501,6 +1625,14 @@ compress_again:
alloced_pages = zs_get_total_pages(zram->mem_pool);
update_used_max(zram, alloced_pages);
if (is_enable_zlimit) {
if (alloced_pages < zram->limit_pages)
znr_swap_pages = (zram->limit_pages
- alloced_pages) * 3;
else
znr_swap_pages = 0;
}
if (zram->limit_pages && alloced_pages > zram->limit_pages) {
zcomp_stream_put(zram->comp);
zram_entry_free(zram, entry);
@@ -1541,6 +1673,10 @@ out:
zram_set_entry(zram, index, entry);
zram_set_obj_size(zram, index, comp_len);
}
#ifdef CONFIG_HYBRIDSWAP_CORE
hybridswap_record(zram, index, page->mem_cgroup);
#endif
zram_slot_unlock(zram, index);
/* Update stats */
@@ -1747,6 +1883,13 @@ static void zram_slot_free_notify(struct block_device *bdev,
return;
}
#ifdef CONFIG_HYBRIDSWAP_CORE
if (!hybridswap_delete(zram, index)) {
zram_slot_unlock(zram, index);
atomic64_inc(&zram->stats.miss_free);
return;
}
#endif
zram_free_page(zram, index);
zram_slot_unlock(zram, index);
}
@@ -1810,6 +1953,7 @@ static void zram_reset_device(struct zram *zram)
down_write(&zram->init_lock);
zram->limit_pages = 0;
is_enable_zlimit = false;
if (!init_done(zram)) {
up_write(&zram->init_lock);
@@ -1967,6 +2111,27 @@ static DEVICE_ATTR_RW(use_dedup);
#else
static DEVICE_ATTR_RO(use_dedup);
#endif
#ifdef CONFIG_HYBRIDSWAP
static DEVICE_ATTR_RO(hybridswap_vmstat);
static DEVICE_ATTR_RW(hybridswap_loglevel);
static DEVICE_ATTR_RW(hybridswap_enable);
#endif
#ifdef CONFIG_HYBRIDSWAP_SWAPD
static DEVICE_ATTR_RW(hybridswap_swapd_pause);
#endif
#ifdef CONFIG_HYBRIDSWAP_CORE
static DEVICE_ATTR_RW(hybridswap_core_enable);
static DEVICE_ATTR_RW(hybridswap_loop_device);
static DEVICE_ATTR_RW(hybridswap_dev_life);
static DEVICE_ATTR_RW(hybridswap_quota_day);
static DEVICE_ATTR_RO(hybridswap_report);
static DEVICE_ATTR_RO(hybridswap_stat_snap);
static DEVICE_ATTR_RO(hybridswap_meminfo);
static DEVICE_ATTR_RW(hybridswap_zram_increase);
#endif
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
static DEVICE_ATTR_RW(hybridswap_akcompress);
#endif
static struct attribute *zram_disk_attrs[] = {
&dev_attr_disksize.attr,
@@ -1991,6 +2156,27 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_bd_stat.attr,
#endif
&dev_attr_debug_stat.attr,
#ifdef CONFIG_HYBRIDSWAP
&dev_attr_hybridswap_vmstat.attr,
&dev_attr_hybridswap_loglevel.attr,
&dev_attr_hybridswap_enable.attr,
#endif
#ifdef CONFIG_HYBRIDSWAP_SWAPD
&dev_attr_hybridswap_swapd_pause.attr,
#endif
#ifdef CONFIG_HYBRIDSWAP_CORE
&dev_attr_hybridswap_core_enable.attr,
&dev_attr_hybridswap_report.attr,
&dev_attr_hybridswap_meminfo.attr,
&dev_attr_hybridswap_stat_snap.attr,
&dev_attr_hybridswap_loop_device.attr,
&dev_attr_hybridswap_dev_life.attr,
&dev_attr_hybridswap_quota_day.attr,
&dev_attr_hybridswap_zram_increase.attr,
#endif
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
&dev_attr_hybridswap_akcompress.attr,
#endif
NULL,
};
@@ -2132,6 +2318,9 @@ static int zram_remove(struct zram *zram)
del_gendisk(zram->disk);
blk_cleanup_queue(zram->disk->queue);
put_disk(zram->disk);
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
destroy_akcompressd_task(zram);
#endif
kfree(zram);
return 0;
}
@@ -2255,6 +2444,11 @@ static int __init zram_init(void)
num_devices--;
}
#ifdef CONFIG_HYBRIDSWAP
ret = hybridswap_pre_init();
if (ret)
goto out_error;
#endif
return 0;
out_error:

View File

@@ -52,7 +52,16 @@ enum zram_pageflags {
ZRAM_UNDER_WB, /* page is under writeback */
ZRAM_HUGE, /* Incompressible page */
ZRAM_IDLE, /* not accessed page since last idle marking */
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
ZRAM_CACHED, /* page is cached in async compress cache buffer */
ZRAM_CACHED_COMPRESS, /* page is under async compress */
#endif
#ifdef CONFIG_HYBRIDSWAP_CORE
ZRAM_BATCHING_OUT,
ZRAM_FROM_HYBRIDSWAP,
ZRAM_MCGID_CLEAR,
ZRAM_IN_BD, /* zram stored in back device */
#endif
__NR_ZRAM_PAGEFLAGS,
};
@@ -71,6 +80,9 @@ struct zram_table_entry {
union {
struct zram_entry *entry;
unsigned long element;
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
unsigned long page;
#endif
};
unsigned long flags;
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
@@ -148,6 +160,15 @@ struct zram {
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
struct dentry *debugfs_dir;
#endif
#if (defined CONFIG_ZRAM_WRITEBACK) || (defined CONFIG_HYBRIDSWAP_CORE)
struct block_device *bdev;
unsigned int old_block_size;
unsigned long nr_pages;
unsigned long increase_nr_pages;
#endif
#ifdef CONFIG_HYBRIDSWAP_CORE
struct hyb_info *infos;
#endif
};
static inline bool zram_dedup_enabled(struct zram *zram)
@@ -160,4 +181,11 @@ static inline bool zram_dedup_enabled(struct zram *zram)
}
void zram_entry_free(struct zram *zram, struct zram_entry *entry);
#ifdef CONFIG_ZRAM_WRITEBACK
void ksys_sync(void);
#endif
#ifdef CONFIG_ZWB_HANDLE
extern struct task_struct *zwb_clear_tsk;
#endif
#endif

View File

@@ -0,0 +1,42 @@
#ifndef _ZRAM_DRV_INTERNAL_H_
#define _ZRAM_DRV_INTERNAL_H_
#include "zram_drv.h"
#ifdef BIT
#undef BIT
#define BIT(nr) (1lu << (nr))
#endif
#define zram_slot_lock(zram, index) (bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags))
#define zram_slot_unlock(zram, index) (bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags))
#define init_done(zram) (zram->disksize)
#define dev_to_zram(dev) ((struct zram *)dev_to_disk(dev)->private_data)
#define zram_get_handle(zram, index) ((unsigned long)(zram->table[index].entry))
#define zram_set_handle(zram, index, handle_val) (zram->table[index].entry = (struct zram_entry *)handle_val)
#define zram_test_flag(zram, index, flag) (zram->table[index].flags & BIT(flag))
#define zram_set_flag(zram, index, flag) (zram->table[index].flags |= BIT(flag))
#define zram_clear_flag(zram, index, flag) (zram->table[index].flags &= ~BIT(flag))
#define zram_set_element(zram, index, element) (zram->table[index].element = element)
#define zram_get_obj_size(zram, index) (zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1))
#define zram_set_obj_size(zram, index, size) do {\
unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT; \
zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size; \
} while(0)
#ifdef CONFIG_HYBRIDSWAP_ASYNC_COMPRESS
extern int async_compress_page(struct zram *zram, struct page* page);
extern void update_zram_index(struct zram *zram, u32 index, unsigned long page);
#endif
#endif /* _ZRAM_DRV_INTERNAL_H_ */

View File

@@ -0,0 +1 @@
../../../../../vendor/oplus/kernel/oplus_performance/zwb_handle/

View File

@@ -659,10 +659,13 @@ vdd_xtal_fail:
bt_vreg_unvote(bt_power_pdata->bt_vdd_rfa2);
if (bt_power_pdata->bt_vdd_rfa1)
bt_vreg_unvote(bt_power_pdata->bt_vdd_rfa1);
/*
* if did't have pm8009 dig power and aon power disable enter Retention mode
if (bt_power_pdata->bt_vdd_dig)
bt_vreg_unvote(bt_power_pdata->bt_vdd_dig);
if (bt_power_pdata->bt_vdd_aon)
bt_vreg_unvote(bt_power_pdata->bt_vdd_aon);
*/
} else {
BT_PWR_ERR("Invalid power mode: %d", on);
rc = -1;

View File

@@ -210,9 +210,9 @@ static int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
/* save the enable channel status */
if (ret == 0)
bt_soc_enable_status = 1;
if (ret == -EISCONN) {
BTFMSLIM_ERR("channel opened without closing, return success");
BTFMSLIM_ERR("channel opened without closing, returning success");
ret = 0;
}
return ret;

View File

@@ -1573,7 +1573,7 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
init_waitqueue_head(&mhi_cntrl->state_event);
mhi_cntrl->wq = alloc_ordered_workqueue("mhi_w",
WQ_MEM_RECLAIM | WQ_HIGHPRI);
WQ_HIGHPRI);
if (!mhi_cntrl->wq)
goto error_alloc_cmd;

View File

@@ -1045,6 +1045,10 @@ error_dev_ctxt:
}
EXPORT_SYMBOL(mhi_async_power_up);
#ifdef OPLUS_BUG_STABILITY
extern bool direct_panic;
#endif
/* Transition MHI into error state and notify critical clients */
void mhi_control_error(struct mhi_controller *mhi_cntrl)
{
@@ -1060,6 +1064,11 @@ void mhi_control_error(struct mhi_controller *mhi_cntrl)
memcpy(sfr_info->str, sfr_info->buf_addr, sfr_info->len);
MHI_CNTRL_ERR("mhi:%s sfr: %s\n", mhi_cntrl->name,
sfr_info->buf_addr);
#ifdef OPLUS_BUG_STABILITY
if(strstr(sfr_info->buf_addr, "remotefs_sahara.c")) {
direct_panic = true;
}
#endif
}
/* link is not down if device supports RDDM */
@@ -1142,10 +1151,18 @@ int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
if (ret)
return ret;
#ifndef OPLUS_BUG_STABILITY
//Modify for: mhi timeout 10s to 20s
wait_event_timeout(mhi_cntrl->state_event,
MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
#else /* OPLUS_BUG_STABILITY */
wait_event_timeout(mhi_cntrl->state_event,
MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(20000));
#endif /* OPLUS_BUG_STABILITY */
if (MHI_IN_MISSION_MODE(mhi_cntrl->ee))
return 0;

View File

@@ -65,7 +65,12 @@ enum MHI_DEBUG_LEVEL msg_lvl = MHI_MSG_LVL_ERROR;
#ifdef CONFIG_MHI_DEBUG
#define MHI_UCI_IPC_LOG_PAGES (25)
#ifdef OPLUS_BUG_STABILITY
#define MHI_UCI_IPC_LOG_PAGES (100)
#else
#define MHI_UCI_IPC_LOG_PAGES (25)
#endif /* OPLUS_BUG_STABILITY */
#define MSG_VERB(fmt, ...) do { \
if (msg_lvl <= MHI_MSG_LVL_VERBOSE) \
pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__); \

View File

@@ -3853,6 +3853,7 @@ static int fastrpc_file_free(struct fastrpc_file *fl)
mutex_destroy(&fl->map_mutex);
mutex_destroy(&fl->pm_qos_mutex);
mutex_destroy(&fl->internal_map_mutex);
mutex_destroy(&fl->pm_qos_mutex);
kfree(fl);
return 0;
}

View File

@@ -7,7 +7,11 @@
#include <linux/ipc_logging.h>
#if defined(CONFIG_MHI_DEBUG) && defined(OPLUS_BUG_STABILITY)
#define DIAG_IPC_LOG_PAGES 200
#else
#define DIAG_IPC_LOG_PAGES 50
#endif /* CONFIG_MHI_DEBUG and OPLUS_BUG_STABILITY */
#define DIAG_DEBUG_USERSPACE 0x0001
#define DIAG_DEBUG_MUX 0x0002

View File

@@ -22,6 +22,9 @@
#include "diagfwd_mhi.h"
#include "diag_dci.h"
#include "diag_ipc_logging.h"
#ifdef OPLUS_FEATURE_CHG_BASIC
#include "diagfwd.h"
#endif
#ifdef CONFIG_MHI_BUS
#define diag_mdm_init diag_mhi_init
@@ -310,6 +313,29 @@ int diagfwd_bridge_close(int id)
int diagfwd_bridge_write(int id, unsigned char *buf, int len)
{
#ifdef OPLUS_FEATURE_CHG_BASIC
uint16_t cmd_code;
uint16_t subsys_id;
uint16_t cmd_code_lo;
uint16_t cmd_code_hi;
unsigned char *temp = NULL;
temp = buf;
cmd_code = (uint16_t)(*(uint8_t *)temp);
temp += sizeof(uint8_t);
subsys_id = (uint16_t)(*(uint8_t *)temp);
temp += sizeof(uint8_t);
cmd_code_hi = (uint16_t)(*(uint16_t *)temp);
cmd_code_lo = (uint16_t)(*(uint16_t *)temp);
if (cmd_code == 0x4b && subsys_id == 0xb && cmd_code_hi == 0x35 && cmd_code_lo == 0x35) {
pr_err("diag command with 75 11 53\n");
if (!driver->hdlc_disabled)
diag_process_hdlc_pkt(buf, len, 0);
else
diag_process_non_hdlc_pkt(buf, len, 0);
}
#endif /* OPLUS_FEATURE_CHG_BASIC */
if (id < 0 || id >= NUM_REMOTE_DEV)
return -EINVAL;
if (bridge_info[id].dev_ops && bridge_info[id].dev_ops->write) {

View File

@@ -28,6 +28,7 @@
#include <linux/of_platform.h>
#include <linux/pm_opp.h>
#include <linux/regulator/consumer.h>
#include <linux/proc_fs.h>
#include "clk.h"
@@ -103,6 +104,9 @@ struct clk_core {
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
struct hlist_node debug_node;
#elif defined OPLUS_FEATURE_POWERINFO_RPMH
struct proc_dir_entry *dentry;
struct hlist_node debug_node;
#endif
struct kref ref;
struct clk_vdd_class *vdd_class;
@@ -4038,6 +4042,825 @@ static int __init clk_debug_init(void)
return 0;
}
late_initcall(clk_debug_init);
#elif defined OPLUS_FEATURE_POWERINFO_RPMH
static struct proc_dir_entry *rootdir;
static int inited = 0;
static u32 debug_suspend;
static DEFINE_MUTEX(clk_debug_lock);
static HLIST_HEAD(clk_debug_list);
static struct hlist_head *all_lists[] = {
&clk_root_list,
&clk_orphan_list,
NULL,
};
static void clk_state_subtree(struct clk_core *c)
{
int vdd_level = 0;
struct clk_core *child;
if (!c)
return;
if (c->vdd_class) {
vdd_level = clk_find_vdd_level(c, c->rate);
if (vdd_level < 0)
vdd_level = 0;
}
trace_clk_state(c->name, c->prepare_count, c->enable_count,
c->rate, vdd_level);
hlist_for_each_entry(child, &c->children, child_node)
clk_state_subtree(child);
}
static int clk_state_show(struct seq_file *s, void *data)
{
struct clk_core *c;
struct hlist_head **lists = (struct hlist_head **)s->private;
clk_prepare_lock();
for (; *lists; lists++)
hlist_for_each_entry(c, *lists, child_node)
clk_state_subtree(c);
clk_prepare_unlock();
return 0;
}
static int clk_state_open(struct inode *inode, struct file *file)
{
return single_open(file, clk_state_show, inode->i_private);
}
static const struct file_operations clk_state_fops = {
.open = clk_state_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
int level)
{
if (!c)
return;
seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n",
level * 3 + 1, "",
30 - level * 3, c->name,
c->enable_count, c->prepare_count, c->protect_count,
clk_core_get_rate(c), clk_core_get_accuracy(c),
clk_core_get_phase(c),
clk_core_get_scaled_duty_cycle(c, 100000));
}
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
int level)
{
struct clk_core *child;
if (!c)
return;
if (c->ops->bus_vote)
c->ops->bus_vote(c->hw, true);
clk_summary_show_one(s, c, level);
hlist_for_each_entry(child, &c->children, child_node)
clk_summary_show_subtree(s, child, level + 1);
if (c->ops->bus_vote)
c->ops->bus_vote(c->hw, false);
}
static int clk_summary_show(struct seq_file *s, void *data)
{
struct clk_core *c;
struct hlist_head **lists = (struct hlist_head **)s->private;
seq_puts(s, " enable prepare protect duty\n");
seq_puts(s, " clock count count count rate accuracy phase cycle\n");
seq_puts(s, "---------------------------------------------------------------------------------------------\n");
clk_prepare_lock();
for (; *lists; lists++)
hlist_for_each_entry(c, *lists, child_node)
clk_summary_show_subtree(s, c, 0);
clk_prepare_unlock();
return 0;
}
DEFINE_PROC_SHOW_ATTRIBUTE(clk_summary);
static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
{
if (!c)
return;
/* This should be JSON format, i.e. elements separated with a comma */
seq_printf(s, "\"%s\": { ", c->name);
seq_printf(s, "\"enable_count\": %d,", c->enable_count);
seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
seq_printf(s, "\"protect_count\": %d,", c->protect_count);
seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c));
seq_printf(s, "\"duty_cycle\": %u",
clk_core_get_scaled_duty_cycle(c, 100000));
}
static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
{
struct clk_core *child;
if (!c)
return;
if (c->ops->bus_vote)
c->ops->bus_vote(c->hw, true);
clk_dump_one(s, c, level);
hlist_for_each_entry(child, &c->children, child_node) {
seq_putc(s, ',');
clk_dump_subtree(s, child, level + 1);
}
seq_putc(s, '}');
if (c->ops->bus_vote)
c->ops->bus_vote(c->hw, false);
}
static int clk_dump_show(struct seq_file *s, void *data)
{
struct clk_core *c;
bool first_node = true;
struct hlist_head **lists = (struct hlist_head **)s->private;
seq_putc(s, '{');
clk_prepare_lock();
for (; *lists; lists++) {
hlist_for_each_entry(c, *lists, child_node) {
if (!first_node)
seq_putc(s, ',');
first_node = false;
clk_dump_subtree(s, c, 0);
}
}
clk_prepare_unlock();
seq_puts(s, "}\n");
return 0;
}
DEFINE_PROC_SHOW_ATTRIBUTE(clk_dump);
static const struct {
unsigned long flag;
const char *name;
} clk_flags[] = {
#define ENTRY(f) { f, #f }
ENTRY(CLK_SET_RATE_GATE),
ENTRY(CLK_SET_PARENT_GATE),
ENTRY(CLK_SET_RATE_PARENT),
ENTRY(CLK_IGNORE_UNUSED),
ENTRY(CLK_IS_BASIC),
ENTRY(CLK_GET_RATE_NOCACHE),
ENTRY(CLK_SET_RATE_NO_REPARENT),
ENTRY(CLK_GET_ACCURACY_NOCACHE),
ENTRY(CLK_RECALC_NEW_RATES),
ENTRY(CLK_SET_RATE_UNGATE),
ENTRY(CLK_IS_CRITICAL),
ENTRY(CLK_OPS_PARENT_ENABLE),
ENTRY(CLK_DUTY_CYCLE_PARENT),
#undef ENTRY
};
static int clk_flags_show(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
unsigned long flags = core->flags;
unsigned int i;
for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) {
if (flags & clk_flags[i].flag) {
seq_printf(s, "%s\n", clk_flags[i].name);
flags &= ~clk_flags[i].flag;
}
}
if (flags) {
/* Unknown flags */
seq_printf(s, "0x%lx\n", flags);
}
return 0;
}
DEFINE_PROC_SHOW_ATTRIBUTE(clk_flags);
static int possible_parents_show(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
int i;
for (i = 0; i < core->num_parents - 1; i++)
seq_printf(s, "%s ", core->parent_names[i]);
seq_printf(s, "%s\n", core->parent_names[i]);
return 0;
}
DEFINE_PROC_SHOW_ATTRIBUTE(possible_parents);
static int clk_duty_cycle_show(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
struct clk_duty *duty = &core->duty;
seq_printf(s, "%u/%u\n", duty->num, duty->den);
return 0;
}
DEFINE_PROC_SHOW_ATTRIBUTE(clk_duty_cycle);
static ssize_t clock_debug_rate_store(struct file *file,
const char __user *buffer, size_t count, loff_t *ppos)
{
char buff_temp[8];
char *end;
u64 val;
int ret ;
struct clk_core *core = ((struct seq_file *)file->private_data)->private;
if(count <= 0 || buffer == NULL){
pr_err("Input buf is NULL\n");
return 0;
}
copy_from_user(buff_temp, buffer, 8);
val = simple_strtoll(buff_temp, &end, 0);
clk_prepare_lock();
if (core->ops->bus_vote)
core->ops->bus_vote(core->hw, true);
ret = clk_set_rate(core->hw->clk, val);
if (ret)
pr_err("clk_set_rate(%llu) failed (%d)\n",
(unsigned long)val, ret);
if (core->ops->bus_vote)
core->ops->bus_vote(core->hw, false);
clk_prepare_unlock();
return count;
}
static int clock_debug_rate_show(struct seq_file *file, void *v)
{
struct clk_core *core = file->private;
u64 val;
clk_prepare_lock();
if (core->ops->bus_vote)
core->ops->bus_vote(core->hw, true);
val = clk_get_rate(core->hw->clk);
if (core->ops->bus_vote)
core->ops->bus_vote(core->hw, false);
clk_prepare_unlock();
seq_printf(file, "%llu\n", val);
return 0;
}
static int clock_debug_rate_open(struct inode *inode, struct file *file)
{
return single_open(file, clock_debug_rate_show, PDE_DATA(inode));
}
static const struct file_operations clock_rate_fops = {
.open = clock_debug_rate_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = clock_debug_rate_store,
};
static int clock_parent_show(struct seq_file *file, void *v)
{
char name[256] = {0};
struct clk_core *core = file->private;
struct clk_core *p = core->hw->core->parent;
snprintf(name, sizeof(name), "%s\n", p ? p->name : "None\n");
seq_printf(file, "%d\n", name);
return 0;
//return simple_read_from_buffer(ubuf, cnt, ppos, name, strlen(name));
}
DEFINE_PROC_SHOW_ATTRIBUTE(clock_parent);
static ssize_t clock_enable_store(struct file *file,
const char __user *buffer, size_t count, loff_t *ppos)
{
char buff_temp[8];
char *end;
u64 val;
struct clk_core *core = ((struct seq_file *)file->private_data)->private;
int rc = 0;
if(count <= 0 || buffer == NULL){
pr_err("Input buf is NULL\n");
return 0;
}
copy_from_user(buff_temp, buffer, 8);
val = simple_strtoll(buff_temp, &end, 0);
clk_prepare_lock();
if (core->ops->bus_vote)
core->ops->bus_vote(core->hw, true);
if (val)
rc = clk_prepare_enable(core->hw->clk);
else
clk_disable_unprepare(core->hw->clk);
if (core->ops->bus_vote)
core->ops->bus_vote(core->hw, false);
clk_prepare_unlock();
return count;
}
static int clock_enable_show(struct seq_file *file, void *v)
{
struct clk_core *core = file->private;
int enabled = 0;
enabled = core->enable_count;
seq_printf(file, "%d\n", enabled);
return 0;
}
DEFINE_PROC_SHOW_STORE_ATTRIBUTE(clock_enable);
#define clock_debug_output(m, c, fmt, ...) \
do { \
if (m) \
seq_printf(m, fmt, ##__VA_ARGS__); \
else if (c) \
pr_cont(fmt, ##__VA_ARGS__); \
else \
pr_info(fmt, ##__VA_ARGS__); \
} while (0)
static int clock_debug_print_clock(struct clk_core *c, struct seq_file *s)
{
char *start = "";
struct clk *clk;
if (!c || !c->prepare_count)
return 0;
clk = c->hw->clk;
clock_debug_output(s, 0, " ");
do {
if (clk->core->vdd_class)
clock_debug_output(s, 1, "%s%s:%u:%u [%ld, %d]", start,
clk->core->name,
clk->core->prepare_count,
clk->core->enable_count,
clk->core->rate,
clk_find_vdd_level(clk->core, clk->core->rate));
else
clock_debug_output(s, 1, "%s%s:%u:%u [%ld]", start,
clk->core->name,
clk->core->prepare_count,
clk->core->enable_count,
clk->core->rate);
start = " -> ";
} while ((clk = clk_get_parent(clk)));
clock_debug_output(s, 1, "\n");
return 1;
}
/*
* clock_debug_print_enabled_clocks() - Print names of enabled clocks
*/
static void clock_debug_print_enabled_clocks(struct seq_file *s)
{
struct clk_core *core;
int cnt = 0;
if (!mutex_trylock(&clk_debug_lock))
return;
clock_debug_output(s, 0, "Enabled clocks:\n");
hlist_for_each_entry(core, &clk_debug_list, debug_node)
cnt += clock_debug_print_clock(core, s);
mutex_unlock(&clk_debug_lock);
if (cnt)
clock_debug_output(s, 0, "Enabled clock count: %d\n", cnt);
else
clock_debug_output(s, 0, "No clocks enabled.\n");
}
static int clk_enabled_list_show(struct seq_file *s, void *unused)
{
clock_debug_print_enabled_clocks(s);
return 0;
}
DEFINE_PROC_SHOW_ATTRIBUTE(clk_enabled_list);
void clk_debug_print_hw(struct clk_core *clk, struct seq_file *f)
{
if (IS_ERR_OR_NULL(clk))
return;
clk_debug_print_hw(clk->parent, f);
clock_debug_output(f, false, "%s\n", clk->name);
if (!clk->ops->list_registers)
return;
clk->ops->list_registers(f, clk->hw);
}
EXPORT_SYMBOL(clk_debug_print_hw);
static int clock_print_hw_show(struct seq_file *m, void *unused)
{
struct clk_core *c = m->private;
struct clk_core *clk;
clk_prepare_lock();
for (clk = c; clk; clk = clk->parent)
if (clk->ops->bus_vote)
clk->ops->bus_vote(clk->hw, true);
clk_debug_print_hw(c, m);
for (clk = c; clk; clk = clk->parent)
if (clk->ops->bus_vote)
clk->ops->bus_vote(c->hw, false);
clk_prepare_unlock();
return 0;
}
DEFINE_PROC_SHOW_ATTRIBUTE(clock_print_hw);
static int list_rates_show(struct seq_file *s, void *unused)
{
struct clk_core *core = s->private;
int level = 0, i = 0;
unsigned long rate, rate_max = 0;
/* Find max frequency supported within voltage constraints. */
if (!core->vdd_class) {
rate_max = ULONG_MAX;
} else {
for (level = 0; level < core->num_rate_max; level++)
if (core->rate_max[level])
rate_max = core->rate_max[level];
}
/*
* List supported frequencies <= rate_max. Higher frequencies may
* appear in the frequency table, but are not valid and should not
* be listed.
*/
while (!IS_ERR_VALUE(rate =
core->ops->list_rate(core->hw, i++, rate_max))) {
if (rate <= 0)
break;
if (rate <= rate_max)
seq_printf(s, "%lu\n", rate);
}
return 0;
}
DEFINE_PROC_SHOW_ATTRIBUTE(list_rates);
static void clock_print_rate_max_by_level(struct seq_file *s, int level)
{
struct clk_core *core = s->private;
struct clk_vdd_class *vdd_class = core->vdd_class;
int off, i, vdd_level, nregs = vdd_class->num_regulators;
vdd_level = clk_find_vdd_level(core, core->rate);
seq_printf(s, "%2s%10lu", vdd_level == level ? "[" : "",
core->rate_max[level]);
for (i = 0; i < nregs; i++) {
off = nregs*level + i;
if (vdd_class->vdd_uv)
seq_printf(s, "%10u", vdd_class->vdd_uv[off]);
}
if (vdd_level == level)
seq_puts(s, "]");
seq_puts(s, "\n");
}
static int rate_max_show(struct seq_file *s, void *unused)
{
struct clk_core *core = s->private;
struct clk_vdd_class *vdd_class = core->vdd_class;
int level = 0, i, nregs = vdd_class->num_regulators;
char reg_name[10];
int vdd_level = clk_find_vdd_level(core, core->rate);
if (vdd_level < 0) {
seq_printf(s, "could not find_vdd_level for %s, %ld\n",
core->name, core->rate);
return 0;
}
seq_printf(s, "%12s", "");
for (i = 0; i < nregs; i++) {
snprintf(reg_name, ARRAY_SIZE(reg_name), "reg %d", i);
seq_printf(s, "%10s", reg_name);
}
seq_printf(s, "\n%12s", "freq");
for (i = 0; i < nregs; i++)
seq_printf(s, "%10s", "uV");
seq_puts(s, "\n");
for (level = 0; level < core->num_rate_max; level++)
clock_print_rate_max_by_level(s, level);
return 0;
}
DEFINE_PROC_SHOW_ATTRIBUTE(rate_max);
static int clk_accuracy_show(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
seq_printf(s, "%lu\n", core->accuracy);
return 0;
}
DEFINE_PROC_SHOW_ATTRIBUTE(clk_accuracy);
static int clk_phase_show(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
seq_printf(s, "%d\n", core->phase);
return 0;
}
DEFINE_PROC_SHOW_ATTRIBUTE(clk_phase);
static int clk_prepare_count_show(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
seq_printf(s, "%d\n", core->prepare_count);
return 0;
}
DEFINE_PROC_SHOW_ATTRIBUTE(clk_prepare_count);
static int clk_notifier_count_show(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
seq_printf(s, "%d\n", core->notifier_count);
return 0;
}
DEFINE_PROC_SHOW_ATTRIBUTE(clk_notifier_count);
static int clk_debug_create_one(struct clk_core *core, struct proc_dir_entry *pdentry)
{
struct proc_dir_entry *d;
int ret = -ENOMEM;
if (!core || !pdentry) {
ret = -EINVAL;
goto out;
}
d = proc_mkdir(core->name, pdentry);
if (!d)
goto out;
core->dentry = d;
d = proc_create_data("clk_rate", 0444, core->dentry, &clock_rate_fops, core);
if (!d)
goto err_out;
if (core->ops->list_rate) {
if (!proc_create_data("clk_list_rates",
0444, core->dentry, &list_rates_fops, core))
goto err_out;
}
if (core->vdd_class && !proc_create_data("clk_rate_max",
0444, core->dentry, &rate_max_fops, core))
goto err_out;
d = proc_create_data("clk_accuracy", 0444, core->dentry, &clk_accuracy_fops, core);
if (!d)
goto err_out;
d = proc_create_data("clk_phase", 0444, core->dentry, &clk_phase_fops, core);
if (!d)
goto err_out;
d = proc_create_data("clk_prepare_count", 0444, core->dentry, &clk_prepare_count_fops, core);
if (!d)
goto err_out;
d = proc_create_data("clk_flags", 0444, core->dentry, &clk_flags_fops, core);
if (!d)
goto err_out;
d = proc_create_data("clk_enable_count", 0444, core->dentry, &clock_enable_fops, core);
if (!d)
goto err_out;
d = proc_create_data("clk_notifier_count", 0444, core->dentry, &clk_notifier_count_fops, core);
if (!d)
goto err_out;
if (core->num_parents > 1) {
d = proc_create_data("clk_possible_parents", 0444,
core->dentry, &possible_parents_fops, core);
if (!d)
goto err_out;
}
d = proc_create_data("clk_parent", 0444, core->dentry, &clock_parent_fops, core);
if (!d)
goto err_out;
d = proc_create_data("clk_print_regs", 0444, core->dentry, &clock_print_hw_fops, core);
if (!d)
goto err_out;
//if (core->ops->debug_init)
// core->ops->debug_init(core->hw, core->dentry);
ret = 0;
goto out;
err_out:
proc_remove(core->dentry);
core->dentry = NULL;
out:
return ret;
}
/**
* clk_debug_register - add a clk node to the debugfs clk directory
* @core: the clk being added to the debugfs clk directory
*
* Dynamically adds a clk to the debugfs clk directory if debugfs has been
* initialized. Otherwise it bails out early since the debugfs clk directory
* will be created lazily by clk_debug_init as part of a late_initcall.
*/
static void clk_debug_register(struct clk_core *core)
{
mutex_lock(&clk_debug_lock);
hlist_add_head(&core->debug_node, &clk_debug_list);
if (inited)
clk_debug_create_one(core, rootdir);
mutex_unlock(&clk_debug_lock);
}
/**
* clk_debug_unregister - remove a clk node from the debugfs clk directory
* @core: the clk being removed from the debugfs clk directory
*
* Dynamically removes a clk and all its child nodes from the
* debugfs clk directory if clk->dentry points to debugfs created by
* clk_debug_register in __clk_core_init.
*/
static void clk_debug_unregister(struct clk_core *core)
{
mutex_lock(&clk_debug_lock);
hlist_del_init(&core->debug_node);
proc_remove(core->dentry);
core->dentry = NULL;
mutex_unlock(&clk_debug_lock);
}
/*
* Print the names of all enabled clocks and their parents if
* debug_suspend is set from debugfs.
*/
void clock_debug_print_enabled(void)
{
if (likely(!debug_suspend))
return;
clock_debug_print_enabled_clocks(NULL);
}
EXPORT_SYMBOL_GPL(clock_debug_print_enabled);
static ssize_t debug_suspend_store(struct file *file,
const char __user *buffer, size_t count, loff_t *ppos)
{
char buff_temp[8];
char *end;
if(count <= 0 || buffer == NULL){
pr_err("Input buf is NULL\n");
return 0;
}
copy_from_user(buff_temp, buffer, 8);
debug_suspend = simple_strtoll(buff_temp, &end, 0);
return count;
}
static int debug_suspend_show(struct seq_file *m, void *v)
{
seq_printf(m, "%d\n", debug_suspend);
return 0;
}
DEFINE_PROC_SHOW_STORE_ATTRIBUTE(debug_suspend);
/**
* oplus_clk_debug_init - lazily populate the debugfs clk directory
*
* clks are often initialized very early during boot before memory can be
* dynamically allocated and well before debugfs is setup. This function
* populates the debugfs clk directory once at boot-time when we know that
* debugfs is setup. It should only be called once at boot-time, all other clks
* added dynamically will be done so with clk_debug_register.
*/
static int __init oplus_clk_debug_init(void)
{
struct proc_dir_entry *d;
struct clk_core *core;
rootdir = proc_mkdir("oplus_clk", NULL);
if(!rootdir) {
return -ENOMEM;
}
d = proc_create_data("clk_summary", 0444, rootdir, &clk_summary_fops, &all_lists);
if (!d)
return -ENOMEM;
d = proc_create_data("clk_dump", 0444, rootdir, &clk_dump_fops, &all_lists);
if (!d)
return -ENOMEM;
d = proc_create_data("clk_enabled_list", 0444, rootdir, &clk_enabled_list_fops, &clk_debug_list);
if (!d)
return -ENOMEM;
d = proc_create("debug_suspend", 0644, rootdir, &debug_suspend_fops);
if (!d)
return -ENOMEM;
d = proc_create_data("trace_clocks", 0444, rootdir, &clk_state_fops, &all_lists);
if (!d)
return -ENOMEM;
mutex_lock(&clk_debug_lock);
hlist_for_each_entry(core, &clk_debug_list, debug_node)
clk_debug_create_one(core, rootdir);
inited = 1;
mutex_unlock(&clk_debug_lock);
return 0;
}
late_initcall(oplus_clk_debug_init);
#else
static inline void clk_debug_register(struct clk_core *core) { }
static inline void clk_debug_reparent(struct clk_core *core,
@@ -4057,6 +4880,14 @@ void clock_debug_print_enabled(void)
}
#endif
#if (defined OPLUS_FEATURE_POWERINFO_RPMH)||(defined CONFIG_DEBUG_FS)
void oplus_clock_debug_print_enabled(void)
{
clock_debug_print_enabled_clocks(NULL);
}
EXPORT_SYMBOL_GPL(oplus_clock_debug_print_enabled);
#endif
/**
* __clk_core_init - initialize the data structures in a struct clk_core
* @core: clk_core being initialized
@@ -5554,4 +6385,4 @@ void __init of_clk_init(const struct of_device_id *matches)
force = true;
}
}
#endif
#endif

View File

@@ -34,6 +34,16 @@
#include <linux/sched/sysctl.h>
#include <trace/events/power.h>
#ifdef CONFIG_OPLUS_FEATURE_TPD
#include <linux/tpd/tpd.h>
#endif
#if IS_ENABLED(CONFIG_OPLUS_FEATURE_CPU_JANKINFO)
#include <linux/cpu_jankinfo/jank_freq.h>
#endif
#if defined(OPLUS_FEATURE_TASK_CPUSTATS) && defined(CONFIG_OPLUS_SCHED)
#include <linux/task_sched_info.h>
#endif /* defined(OPLUS_FEATURE_TASK_CPUSTATS) && defined(CONFIG_OPLUS_SCHED) */
static LIST_HEAD(cpufreq_policy_list);
@@ -200,6 +210,14 @@ struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
#ifdef OPLUS_FEATURE_HEALTHINFO
struct list_head *get_cpufreq_policy_list(void)
{
return &cpufreq_policy_list;
}
EXPORT_SYMBOL(get_cpufreq_policy_list);
#endif /* OPLUS_FEATURE_HEALTHINFO */
unsigned int cpufreq_generic_get(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
@@ -361,6 +379,9 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy,
cpufreq_stats_record_transition(policy, freqs->new);
cpufreq_times_record_transition(policy, freqs->new);
policy->cur = freqs->new;
#if defined(OPLUS_FEATURE_TASK_CPUSTATS) && defined(CONFIG_OPLUS_SCHED)
update_freq_info(policy);
#endif /* defined(OPLUS_FEATURE_TASK_CPUSTATS) && defined(CONFIG_OPLUS_SCHED) */
}
}
@@ -507,7 +528,15 @@ EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
unsigned int target_freq)
{
#if IS_ENABLED(CONFIG_OPLUS_FEATURE_CPU_JANKINFO)
unsigned int old_target_freq = target_freq;
#endif
target_freq = clamp_val(target_freq, policy->min, policy->max);
#if IS_ENABLED(CONFIG_OPLUS_FEATURE_CPU_JANKINFO)
jankinfo_update_freq_reach_limit_count(policy,
old_target_freq, target_freq, DO_CLAMP);
#endif
policy->cached_target_freq = target_freq;
if (cpufreq_driver->target_index) {
@@ -905,6 +934,19 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
}
#ifdef OPLUS_FEATURE_HEALTHINFO
#ifdef CONFIG_OPLUS_HEALTHINFO
static ssize_t show_freq_change_info(struct cpufreq_policy *policy, char *buf)
{
ssize_t i = 0;
i += sprintf(buf, "%u,%s\n", policy->org_max, policy->change_comm);
return i;
}
#endif
#endif /* OPLUS_FEATURE_HEALTHINFO */
cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
cpufreq_freq_attr_ro(cpuinfo_min_freq);
cpufreq_freq_attr_ro(cpuinfo_max_freq);
@@ -915,6 +957,11 @@ cpufreq_freq_attr_ro(scaling_cur_freq);
cpufreq_freq_attr_ro(bios_limit);
cpufreq_freq_attr_ro(related_cpus);
cpufreq_freq_attr_ro(affected_cpus);
#ifdef OPLUS_FEATURE_HEALTHINFO
#ifdef CONFIG_OPLUS_HEALTHINFO
cpufreq_freq_attr_ro(freq_change_info);
#endif
#endif /* OPLUS_FEATURE_HEALTHINFO */
cpufreq_freq_attr_rw(scaling_min_freq);
cpufreq_freq_attr_rw(scaling_max_freq);
cpufreq_freq_attr_rw(scaling_governor);
@@ -932,6 +979,11 @@ static struct attribute *default_attrs[] = {
&scaling_driver.attr,
&scaling_available_governors.attr,
&scaling_setspeed.attr,
#ifdef OPLUS_FEATURE_HEALTHINFO
#ifdef CONFIG_OPLUS_HEALTHINFO
&freq_change_info.attr,
#endif
#endif /* OPLUS_FEATURE_HEALTHINFO */
NULL
};
@@ -1279,6 +1331,9 @@ static int cpufreq_online(unsigned int cpu)
per_cpu(cpufreq_cpu_data, j) = policy;
add_cpu_dev_symlink(policy, j);
}
#ifdef CONFIG_OPLUS_FEATURE_TPD
tpd_init_policy(policy);
#endif
} else {
policy->min = policy->user_policy.min;
policy->max = policy->user_policy.max;
@@ -1902,8 +1957,14 @@ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
int ret;
#if IS_ENABLED(CONFIG_OPLUS_FEATURE_CPU_JANKINFO)
unsigned int old_target_freq = target_freq;
#endif
target_freq = clamp_val(target_freq, policy->min, policy->max);
#if IS_ENABLED(CONFIG_OPLUS_FEATURE_CPU_JANKINFO)
jankinfo_update_freq_reach_limit_count(policy,
old_target_freq, target_freq, DO_CLAMP | DO_INCREASE);
#endif
ret = cpufreq_driver->fast_switch(policy, target_freq);
if (ret) {
@@ -2010,6 +2071,10 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
/* Make sure that target_freq is within supported range */
target_freq = clamp_val(target_freq, policy->min, policy->max);
#if IS_ENABLED(CONFIG_OPLUS_FEATURE_CPU_JANKINFO)
jankinfo_update_freq_reach_limit_count(policy,
old_target_freq, target_freq, DO_CLAMP);
#endif
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
policy->cpu, target_freq, relation, old_target_freq);
@@ -2247,7 +2312,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
new_policy->cpu, new_policy->min, new_policy->max);
#ifdef OPLUS_FEATURE_HEALTHINFO
#ifdef CONFIG_OPLUS_HEALTHINFO
policy->org_max = new_policy->max;
#endif
#endif /* OPLUS_FEATURE_HEALTHINFO */
memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
/*
@@ -2286,6 +2355,16 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
policy->max = new_policy->max;
trace_cpu_frequency_limits(policy);
#ifdef OPLUS_FEATURE_HEALTHINFO
#ifdef CONFIG_OPLUS_HEALTHINFO
strncpy(policy->change_comm, current->comm, TASK_COMM_LEN);
#endif
#endif /* OPLUS_FEATURE_HEALTHINFO */
#if defined(OPLUS_FEATURE_TASK_CPUSTATS) && defined(CONFIG_OPLUS_SCHED)
update_freq_limit_info(policy);
#endif /* defined(OPLUS_FEATURE_TASK_CPUSTATS) && defined(CONFIG_OPLUS_SCHED) */
arch_set_max_freq_scale(policy->cpus, policy->max);
policy->cached_target_freq = UINT_MAX;

View File

@@ -212,7 +212,7 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
/* We failed, release resources */
policy->stats = NULL;
kfree(stats->time_in_state);
free_stat:
kfree(stats);
}

View File

@@ -25,6 +25,10 @@
#include <linux/spinlock.h>
#include <linux/threads.h>
#ifdef CONFIG_OPLUS_FEATURE_MIDAS
#include <linux/oplus_midas.h>
#endif
#define UID_HASH_BITS 10
static DECLARE_HASHTABLE(uid_hash_table, UID_HASH_BITS);
@@ -424,6 +428,9 @@ void cpufreq_acct_update_power(struct task_struct *p, u64 cputime)
uid_entry->time_in_state[state] += cputime;
spin_unlock_irqrestore(&uid_lock, flags);
#ifdef CONFIG_OPLUS_FEATURE_MIDAS
midas_record_task_times(uid, cputime, p, state);
#endif
rcu_read_lock();
uid_entry = find_uid_entry_rcu(uid);
if (!uid_entry) {

View File

@@ -228,6 +228,9 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
trace_cpu_idle_rcuidle(index, dev->cpu);
time_start = ns_to_ktime(local_clock());
#ifdef CONFIG_OPLUS_FEATURE_GAME_OPT
g_time_in_state_update_idle(dev->cpu, 1);
#endif
stop_critical_timings();
entered_state = target_state->enter(dev, drv, index);
@@ -235,6 +238,9 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
sched_clock_idle_wakeup_event();
time_end = ns_to_ktime(local_clock());
#ifdef CONFIG_OPLUS_FEATURE_GAME_OPT
g_time_in_state_update_idle(dev->cpu, 0);
#endif
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
/* The cpu is no longer idle or about to enter idle. */

View File

@@ -5,7 +5,9 @@
#ifndef __DRIVER_CPUIDLE_H
#define __DRIVER_CPUIDLE_H
#ifdef CONFIG_OPLUS_FEATURE_GAME_OPT
#include "../soc/oplus/game_opt/game_ctrl.h"
#endif
/* For internal use only */
extern struct cpuidle_governor *cpuidle_curr_governor;
extern struct list_head cpuidle_governors;

View File

@@ -113,6 +113,13 @@ show_attr(__attr) \
store_attr(__attr, (min), (max)) \
static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
#ifdef VENDOR_EDIT
#define gov_attr_rw(__attr, min, max) \
show_attr(__attr) \
store_attr(__attr, (min), (max)) \
static DEVICE_ATTR(__attr, 0664, show_##__attr, store_##__attr)
#endif /* VENDOR_EDIT */
#define show_list_attr(name, n) \
static ssize_t show_list_##name(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -808,9 +815,17 @@ gov_attr(up_scale, 0U, 500U);
gov_attr(up_thres, 1U, 100U);
gov_attr(down_thres, 0U, 90U);
gov_attr(down_count, 0U, 90U);
#ifdef VENDOR_EDIT
gov_attr_rw(hist_memory, 0U, 90U);
gov_attr_rw(hyst_trigger_count, 0U, 90U);
gov_attr_rw(hyst_length, 0U, 90U);
#else
gov_attr(hist_memory, 0U, 90U);
gov_attr(hyst_trigger_count, 0U, 90U);
gov_attr(hyst_length, 0U, 90U);
#endif /* VENDOR_EDIT */
gov_attr(idle_mbps, 0U, 2000U);
gov_attr(use_ab, 0U, 1U);
gov_list_attr(mbps_zones, NUM_MBPS_ZONES, 0U, UINT_MAX);

View File

@@ -44,6 +44,16 @@
#include <uapi/linux/dma-buf.h>
#include <uapi/linux/magic.h>
#ifdef OPLUS_FEATURE_LOWMEM_DBG
/* Add for dump memory */
/* usage when lowmmem occurs. */
#include <soc/oplus/lowmem_dbg.h>
#endif /* OPLUS_FEATURE_LOWMEM_DBG */
#if defined(OPLUS_FEATURE_PERFORMANCE) && defined(CONFIG_PROC_FS)
#include <linux/proc_fs.h>
#endif
static inline int is_dma_buf_file(struct file *);
struct dma_buf_list {
@@ -522,6 +532,15 @@ static inline int is_dma_buf_file(struct file *file)
return file->f_op == &dma_buf_fops;
}
#ifdef OPLUS_FEATURE_LOWMEM_DBG
/* Add for dump memory */
/* usage when lowmmem occurs. */
inline int oplus_is_dma_buf_file(struct file *file)
{
return is_dma_buf_file(file);
}
#endif /* OPLUS_FEATURE_LOWMEM_DBG */
static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
{
struct file *file;
@@ -1328,7 +1347,7 @@ int dma_buf_get_uuid(struct dma_buf *dmabuf, uuid_t *uuid)
}
EXPORT_SYMBOL_GPL(dma_buf_get_uuid);
#ifdef CONFIG_DEBUG_FS
#if defined(CONFIG_DEBUG_FS) || (defined(OPLUS_FEATURE_PERFORMANCE) && defined(CONFIG_PROC_FS))
static int dma_buf_debug_show(struct seq_file *s, void *unused)
{
int ret;
@@ -1568,6 +1587,7 @@ static const struct file_operations dma_procs_debug_fops = {
.release = single_release
};
#ifdef CONFIG_DEBUG_FS
static struct dentry *dma_buf_debugfs_dir;
static int dma_buf_init_debugfs(void)
@@ -1608,6 +1628,64 @@ static void dma_buf_uninit_debugfs(void)
{
debugfs_remove_recursive(dma_buf_debugfs_dir);
}
#else /* CONFIG_DEBUG_FS */
static inline int dma_buf_init_debugfs(void)
{
return 0;
}
static inline void dma_buf_uninit_debugfs(void)
{
}
#endif /* CONFIG_DEBUG_FS */
#if defined(OPLUS_FEATURE_PERFORMANCE) && defined(CONFIG_PROC_FS)
static struct proc_dir_entry *dma_buf_procfs_root;
int dma_buf_init_procfs(void)
{
struct proc_dir_entry *p;
int err = 0;
p = proc_mkdir("dma_buf", NULL);
if (IS_ERR(p))
return PTR_ERR(p);
dma_buf_procfs_root = p;
p = proc_create_data("bufinfo",
S_IFREG | 0664,
dma_buf_procfs_root,
&dma_buf_debug_fops,
NULL);
if (IS_ERR(p)) {
pr_debug("dma_buf: procfs: failed to create node bufinfo\n");
proc_remove(dma_buf_procfs_root);
dma_buf_procfs_root = NULL;
err = PTR_ERR(dma_buf_procfs_root);
return err;
}
p = proc_create_data("dmaprocs",
S_IFREG | 0664,
dma_buf_procfs_root,
&dma_procs_debug_fops,
NULL);
if (IS_ERR(p)) {
pr_debug("dma_buf: procfs: failed to create node dmaprocs\n");
proc_remove(dma_buf_procfs_root);
dma_buf_procfs_root = NULL;
err = PTR_ERR(dma_buf_procfs_root);
}
return err;
}
void dma_buf_uninit_procfs(void)
{
proc_remove(dma_buf_procfs_root);
}
#endif /* defined(OPLUS_FEATURE_PERFORMANCE) && defined(CONFIG_PROC_FS) */
#else
static inline int dma_buf_init_debugfs(void)
{
@@ -1627,6 +1705,9 @@ static int __init dma_buf_init(void)
mutex_init(&db_list.lock);
INIT_LIST_HEAD(&db_list.head);
dma_buf_init_debugfs();
#if defined(OPLUS_FEATURE_PERFORMANCE) && defined(CONFIG_PROC_FS)
dma_buf_init_procfs();
#endif
return 0;
}
subsys_initcall(dma_buf_init);
@@ -1635,5 +1716,8 @@ static void __exit dma_buf_deinit(void)
{
dma_buf_uninit_debugfs();
kern_unmount(dma_buf_mnt);
#if defined(OPLUS_FEATURE_PERFORMANCE) && defined(CONFIG_PROC_FS)
dma_buf_uninit_procfs();
#endif
}
__exitcall(dma_buf_deinit);

View File

@@ -9,6 +9,16 @@
#include <linux/sched/clock.h>
#include <soc/qcom/sysmon.h>
#include "esoc-mdm.h"
#include <soc/oplus/system/boot_mode.h>
#ifdef OPLUS_BUG_STABILITY
/*Add for 5G modem dump*/
extern bool delay_panic;
#endif
#ifdef OPLUS_FEATURE_MODEM_MINIDUMP
#include <soc/qcom/subsystem_restart.h>
#endif /*OPLUS_FEATURE_MODEM_MINIDUMP*/
enum gpio_update_config {
GPIO_UPDATE_BOOTING_CONFIG = 1,
@@ -363,13 +373,42 @@ static void mdm_status_fn(struct work_struct *work)
mdm_update_gpio_configs(mdm, GPIO_UPDATE_RUNNING_CONFIG);
}
#ifdef OPLUS_FEATURE_MODEM_MINIDUMP
extern void mdmreason_set(char * buf);
#endif /*OPLUS_FEATURE_MODEM_MINIDUMP*/
#ifdef OPLUS_FEATURE_MODEM_MINIDUMP
static int strn(const char *p, const char chr)
{
int count = 0;
while(*p)
{
if(*p == chr)
++count;
++p;
}
return count;
}
#endif /*OPLUS_FEATURE_MODEM_MINIDUMP*/
bool modem_force_rst = false;
static void mdm_get_restart_reason(struct work_struct *work)
{
int ret, ntries = 0;
#ifdef OPLUS_FEATURE_MODEM_MINIDUMP
char mdmflag = ':';
int mdmnum,mdmret1,mdmret2,mdmret3=0;
char sfr_buf2[RD_BUF_SIZE];
#endif /*OPLUS_FEATURE_MODEM_MINIDUMP*/
char sfr_buf[RD_BUF_SIZE];
struct mdm_ctrl *mdm =
container_of(work, struct mdm_ctrl, restart_reason_work);
struct device *dev = mdm->dev;
#ifdef OPLUS_FEATURE_MODEM_MINIDUMP
const char *name = mdm->esoc->subsys.name;
#endif /*OPLUS_FEATURE_MODEM_MINIDUMP*/
do {
ret = sysmon_get_reason(&mdm->esoc->subsys, sfr_buf,
@@ -377,6 +416,35 @@ static void mdm_get_restart_reason(struct work_struct *work)
if (!ret) {
esoc_mdm_log("restart reason is %s\n", sfr_buf);
dev_err(dev, "mdm restart reason is %s\n", sfr_buf);
#ifdef OPLUS_FEATURE_MODEM_MINIDUMP
mdmnum = strn(sfr_buf,mdmflag);
mdmret1=0;
mdmret2=0;
mdmret3=0;
if(mdmnum >= 2)
{
mdmnum = 2;
}
while(mdmret3++ < mdmnum)
{
while(sfr_buf[mdmret1++] != mdmflag);
}
while(sfr_buf[mdmret1])
{
sfr_buf2[mdmret2++] = sfr_buf[mdmret1++];
}
sfr_buf2[mdmret2]='\0';
if (!strcmp(name , "esoc0")){
if (modem_force_rst) {
modem_force_rst = false;
mdmreason_set("Force modem reset");
__subsystem_send_uevent(dev, "Force modem reset");
} else {
mdmreason_set(sfr_buf2);
__subsystem_send_uevent(dev, sfr_buf2);
}
}
#endif /*OPLUS_FEATURE_MODEM_MINIDUMP*/
break;
}
msleep(SFR_RETRY_INTERVAL);
@@ -387,6 +455,17 @@ static void mdm_get_restart_reason(struct work_struct *work)
__func__, ret);
}
mdm->get_restart_reason = false;
#ifdef OPLUS_BUG_STABILITY
/*Add for 5G modem dump*/
if (delay_panic) {
snprintf(sfr_buf + strlen(sfr_buf), RD_BUF_SIZE - strlen(sfr_buf), " :SDX5x esoc0 modem crash");
dev_err(dev, "SDX5x trigger dump after 5s !\n");
msleep(5000);
mdm_power_down(mdm);
panic(sfr_buf);
}
#endif
}
void mdm_wait_for_status_low(struct mdm_ctrl *mdm, bool atomic)

View File

@@ -271,6 +271,23 @@ static void esoc_client_link_mdm_crash(struct esoc_clink *esoc_clink)
}
}
static void mdm_force_reset(const struct subsys_desc *mdm_subsys)
{
struct esoc_clink *esoc_clink =
container_of(mdm_subsys,
struct esoc_clink,
subsys);
struct mdm_ctrl *mdm = get_esoc_clink_data(esoc_clink);
esoc_mdm_log("MDM force reset\n");
if (mdm->pon_ops->soft_reset) {
mdm->pon_ops->soft_reset(mdm, true);
}
return;
}
static void mdm_crash_shutdown(const struct subsys_desc *mdm_subsys)
{
struct esoc_clink *esoc_clink =
@@ -557,6 +574,7 @@ static int mdm_register_ssr(struct esoc_clink *esoc_clink)
subsys->ramdump = mdm_subsys_ramdumps;
subsys->powerup = mdm_subsys_powerup;
subsys->crash_shutdown = mdm_crash_shutdown;
subsys->force_reset = mdm_force_reset;
return esoc_clink_register_ssr(esoc_clink);
}

View File

@@ -1,7 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
config MSM_TZ_LOG
tristate "MSM Trust Zone (TZ) Log Driver"
depends on DEBUG_FS
#ifdef OPLUS_FEATURE_SECURITY_COMMON
#depends on DEBUG_FS
#endif
help
This option enables a driver with a debugfs interface for messages
produced by the Secure code (Trust zone). These messages provide

View File

@@ -23,6 +23,10 @@
#include <soc/qcom/qseecomi.h>
#include <soc/qcom/qtee_shmbridge.h>
//#ifdef OPLUS_FEATURE_SECURITY_COMMON
#include <linux/proc_fs.h>
#define TZDBG_DIR_NAME "tzdbg"
//#endif
/* QSEE_LOG_BUF_SIZE = 32K */
#define QSEE_LOG_BUF_SIZE 0x8000
@@ -1131,7 +1135,12 @@ static ssize_t tzdbgfs_read_unencrypted(struct file *file, char __user *buf,
size_t count, loff_t *offp)
{
int len = 0;
int tz_id = *(int *)(file->private_data);
//#ifdef OPLUS_FEATURE_SECURITY_COMMON
struct seq_file *seq = file->private_data;
int tz_id = *(int *)(seq->private);
//else
//int tz_id = *(int *)(file->private_data);
//#endif
if (tz_id == TZDBG_BOOT || tz_id == TZDBG_RESET ||
tz_id == TZDBG_INTERRUPT || tz_id == TZDBG_GENERAL ||
@@ -1253,10 +1262,27 @@ static ssize_t tzdbgfs_read(struct file *file, char __user *buf,
return tzdbgfs_read_encrypted(file, buf, count, offp);
}
//#ifdef OPLUS_FEATURE_SECURITY_COMMON
static int tzdbg_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, NULL, PDE_DATA(inode));
}
static int tzdbg_proc_release(struct inode *inode, struct file *file)
{
return single_release(inode, file);
}
//#endif
static const struct file_operations tzdbg_fops = {
.owner = THIS_MODULE,
.read = tzdbgfs_read,
.open = simple_open,
//#ifdef OPLUS_FEATURE_SECURITY_COMMON
.open = tzdbg_proc_open,
.release = tzdbg_proc_release,
//else
//.open = simple_open,
//#endif
};
@@ -1410,6 +1436,63 @@ static void tzdbg_free_encrypted_log_buf(struct platform_device *pdev)
enc_qseelog_info.vaddr, enc_qseelog_info.paddr);
}
//#ifdef OPLUS_FEATURE_SECURITY_COMMON
//change tzdbg node to proc.
static int tzdbg_procfs_init(struct platform_device *pdev)
{
int rc = 0;
int i;
struct proc_dir_entry *dent_dir;
struct proc_dir_entry *dent;
dent_dir = proc_mkdir(TZDBG_DIR_NAME, NULL);
if (dent_dir == NULL) {
dev_err(&pdev->dev, "tzdbg proc_mkdir failed\n");
return -ENOMEM;
}
for (i = 0; i < TZDBG_STATS_MAX; i++) {
tzdbg.debug_tz[i] = i;
dent = proc_create_data(tzdbg.stat[i].name,
0444, dent_dir,
&tzdbg_fops, &tzdbg.debug_tz[i]);
if (dent == NULL) {
dev_err(&pdev->dev, "TZ proc_create_data failed\n");
rc = -ENOMEM;
goto err;
}
}
tzdbg.disp_buf = kzalloc(max(debug_rw_buf_size,
tzdbg.hyp_debug_rw_buf_size), GFP_KERNEL);
if (tzdbg.disp_buf == NULL)
goto err;
platform_set_drvdata(pdev, dent_dir);
return 0;
err:
if(dent_dir){
remove_proc_entry(TZDBG_DIR_NAME, NULL);
}
return rc;
}
static void tzdbg_procfs_exit(struct platform_device *pdev)
{
struct proc_dir_entry *dent_dir;
if (g_qsee_log) {
qtee_shmbridge_deregister(qseelog_shmbridge_handle);
dma_free_coherent(&pdev->dev, QSEE_LOG_BUF_SIZE,
(void *)g_qsee_log, coh_pmem);
}
kzfree(tzdbg.disp_buf);
dent_dir = platform_get_drvdata(pdev);
if(dent_dir){
remove_proc_entry(TZDBG_DIR_NAME, NULL);
}
}
//else
/*
static int tzdbgfs_init(struct platform_device *pdev)
{
int rc = 0;
@@ -1449,7 +1532,8 @@ static void tzdbgfs_exit(struct platform_device *pdev)
dent_dir = platform_get_drvdata(pdev);
debugfs_remove_recursive(dent_dir);
}
*/
//#endif
static int __update_hypdbg_base(struct platform_device *pdev,
void __iomem *virt_iobase)
{
@@ -1671,8 +1755,13 @@ static int tz_log_probe(struct platform_device *pdev)
goto exit_free_encr_log_buf;
}
if (tzdbgfs_init(pdev))
//#ifdef OPLUS_FEATURE_SECURITY_COMMON
if (tzdbg_procfs_init(pdev))
//else
//if (tzdbgfs_init(pdev))
//#endif
goto exit_free_disp_buf;
return 0;
exit_free_disp_buf:
@@ -1689,7 +1778,11 @@ exit_free_diag_buf:
static int tz_log_remove(struct platform_device *pdev)
{
tzdbgfs_exit(pdev);
//#ifdef OPLUS_FEATURE_SECURITY_COMMON
tzdbg_procfs_exit(pdev);
//else
//tzdbgfs_exit(pdev);
//#endif
dma_free_coherent(&pdev->dev, display_buf_size,
(void *)tzdbg.disp_buf, disp_buf_paddr);
tzdbg_free_encrypted_log_buf(pdev);

View File

@@ -33,6 +33,11 @@
#define CREATE_TRACE_POINTS
#include <trace/events/gpio.h>
#ifdef OPLUS_FEATURE_CHG_BASIC
extern bool oplus_vooc_adapter_update_is_tx_gpio(unsigned long gpio_num);
extern bool oplus_vooc_adapter_update_is_rx_gpio(unsigned long gpio_num);
#endif /* OPLUS_FEATURE_CHG_BASIC */
/* Implementation infrastructure for GPIO interfaces.
*
* The GPIO programming interface allows for inlining speed-critical
@@ -2839,9 +2844,24 @@ static int gpiod_get_raw_value_commit(const struct gpio_desc *desc)
chip = desc->gdev->chip;
offset = gpio_chip_hwgpio(desc);
#ifndef OPLUS_FEATURE_CHG_BASIC
value = chip->get ? chip->get(chip, offset) : -EIO;
value = value < 0 ? value : !!value;
trace_gpio_value(desc_to_gpio(desc), 1, value);
#else
if(oplus_vooc_adapter_update_is_rx_gpio(desc_to_gpio(desc))) {
if(chip->get_oplus_vooc) {
value = chip->get_oplus_vooc(chip, offset);
} else {
pr_err("%s get_oplus_vooc not exist\n", __func__);
value = chip->get ? chip->get(chip, offset) : 0;
}
} else {
value = chip->get ? chip->get(chip, offset) : -EIO;
value = !!value;
trace_gpio_value(desc_to_gpio(desc), 1, value);
}
#endif /* OPLUS_FEATURE_CHG_BASIC */
return value;
}
@@ -3076,8 +3096,22 @@ static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
struct gpio_chip *chip;
chip = desc->gdev->chip;
#ifndef OPLUS_FEATURE_CHG_BASIC
trace_gpio_value(desc_to_gpio(desc), 0, value);
chip->set(chip, gpio_chip_hwgpio(desc), value);
#else
if(oplus_vooc_adapter_update_is_tx_gpio(desc_to_gpio(desc)) == false) {
trace_gpio_value(desc_to_gpio(desc), 0, value);
chip->set(chip, gpio_chip_hwgpio(desc), value);
} else {
if(chip->set_oplus_vooc) {
chip->set_oplus_vooc(chip, gpio_chip_hwgpio(desc), value);
} else {
pr_err("%s set_oplus_vooc not exist\n", __func__);
chip->set(chip, gpio_chip_hwgpio(desc), value);
}
}
#endif /* OPLUS_FEATURE_CHG_BASIC */
}
/*

View File

@@ -87,7 +87,9 @@ obj-y += tilcdc/
obj-$(CONFIG_DRM_QXL) += qxl/
obj-$(CONFIG_DRM_BOCHS) += bochs/
obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio/
obj-$(CONFIG_DRM_MSM) += msm/
#ifdef OPLUS_BUG_STABILITY
#obj-$(CONFIG_DRM_MSM) += msm/
#endif
obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-$(CONFIG_DRM_STM) += stm/
obj-$(CONFIG_DRM_STI) += sti/

View File

@@ -1059,7 +1059,11 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_scanline);
int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi,
u16 brightness)
{
#ifndef OPLUS_BUG_STABILITY
u8 payload[2] = { brightness & 0xff, brightness >> 8 };
#else /*OPLUS_BUG_STABILITY*/
u8 payload[2] = { brightness >> 8, brightness & 0xff};
#endif /*OPLUS_BUG_STABILITY*/
ssize_t err;
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,

View File

@@ -958,6 +958,9 @@ static bool drm_mode_match_timings(const struct drm_display_mode *mode1,
mode1->vsync_start == mode2->vsync_start &&
mode1->vsync_end == mode2->vsync_end &&
mode1->vtotal == mode2->vtotal &&
#ifdef OPLUS_BUG_STABILITY
drm_mode_vrefresh(mode1) == drm_mode_vrefresh(mode2) &&
#endif /* OPLUS_BUG_STABILITY */
mode1->vscan == mode2->vscan;
}

View File

@@ -1,5 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
ccflags-y += -I$(src)
#ifdef OPLUS_BUG_STABILITY
ccflags-y += -I$(srctree)/techpack/display/oplus
#endif /*OPLUS_BUG_STABILITY*/
msm_kgsl_core-y = \
kgsl.o \

View File

@@ -487,7 +487,7 @@ void a6xx_preemption_schedule(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
if (!adreno_is_preemption_enabled(adreno_dev))
if (!ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION))
return;
mutex_lock(&device->mutex);
@@ -602,7 +602,7 @@ void a6xx_preemption_start(struct adreno_device *adreno_dev)
struct adreno_ringbuffer *rb;
unsigned int i;
if (!adreno_is_preemption_enabled(adreno_dev))
if (!ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION))
return;
/* Force the state to be clear */

View File

@@ -2211,6 +2211,10 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
adreno_readreg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
ADRENO_REG_CP_RB_BASE_HI, &base);
#if defined(OPLUS_FEATURE_GPU_MINIDUMP)
device->snapshotfault = fault;
#endif /*OPLUS_FEATURE_GPU_MINIDUMP*/
/*
* Force the CP off for anything but a hard fault to make sure it is
* good and stopped

Some files were not shown because too many files have changed in this diff Show More