drivers: {cpufreq,mediatek}: Allow disabling MediaTek CPU policy

Change-Id: I19610741ae7e153d88164798050ede7158d09a12
Signed-off-by: bengris32 <bengris32@protonmail.ch>
This commit is contained in:
bengris32
2024-11-24 18:26:23 +00:00
committed by nisel
parent 7a84675fda
commit e8f6ddc79a
32 changed files with 643 additions and 86 deletions

View File

@@ -2365,6 +2365,37 @@ unlock:
}
EXPORT_SYMBOL(cpufreq_update_policy);
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
/*
* ppm will make min/max work through cpufreq_set_policy as well as
* scaling_min_freq/scaling_max_freq, then ppm will also keep min/max in
* policy->user_policy
*/
void cpufreq_set_policy_ppm(unsigned int cpu, int min, int max)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct cpufreq_policy new_policy;
int ret;
if (!policy)
return;
down_write(&policy->rwsem);
memcpy(&new_policy, policy, sizeof(*policy));
new_policy.min = min;
new_policy.max = max;
ret = cpufreq_set_policy(policy, &new_policy);
if (!ret) {
policy->user_policy.min = min;
policy->user_policy.max = max;
}
up_write(&policy->rwsem);
cpufreq_cpu_put(policy);
}
EXPORT_SYMBOL(cpufreq_set_policy_ppm);
#endif
/*********************************************************************
* BOOST *
*********************************************************************/

View File

@@ -451,6 +451,7 @@ config MTK_PERFORMANCE_MODULE
source "drivers/misc/mediatek/perf_common/Kconfig"
source "drivers/misc/mediatek/performance/Kconfig"
source "drivers/misc/mediatek/task_turbo/Kconfig"
source "drivers/misc/mediatek/usb_boost/Kconfig"
endmenu # PPT
menu "TinySys"

View File

@@ -162,7 +162,7 @@ obj-$(CONFIG_RT_REGMAP) += rt-regmap/
obj-$(CONFIG_MTK_VIDEOCODEC_DRIVER) += videocodec/
obj-$(CONFIG_MTK_FLASHLIGHT) += flashlight/
obj-$(CONFIG_RT_FLASHLIGHT) += flashlight/richtek/
obj-$(CONFIG_USB) += usb_boost/
obj-$(CONFIG_MTK_USB_BOOST) += usb_boost/
obj-$(CONFIG_USB_MTK_HDRC) += usb20/
ifeq ($(CONFIG_MACH_MT6781),y)
obj-$(CONFIG_MTK_MUSB_PHY) += usb20/mt6781

View File

@@ -47,6 +47,10 @@
#define MAX(a, b) ((a) >= (b) ? (a) : (b))
#define MIN(a, b) ((a) >= (b) ? (b) : (a))
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
extern void (*cpufreq_notifier_fp)(int cid, unsigned long freq);
#endif
/*
* LOCK
*/

View File

@@ -187,7 +187,11 @@ int Ripi_cpu_dvfs_thread(void *data)
struct mt_cpu_dvfs *p;
unsigned long flags;
uint32_t pwdata[4];
#if IS_ENABLED(CONFIG_MTK_CPU_CTRL)
struct cpufreq_freqs freqs;
#else
bool policy_update[NR_MT_CPU_DVFS] = { false };
#endif
int previous_limit = -1;
int previous_base = -1;
@@ -350,6 +354,7 @@ int Ripi_cpu_dvfs_thread(void *data)
if (j > p->idx_opp_ppm_base)
j = p->idx_opp_ppm_base;
#if IS_ENABLED(CONFIG_MTK_CPU_CTRL)
/* Update policy min/max */
p->mt_policy->min =
cpu_dvfs_get_freq_by_idx(p,
@@ -357,6 +362,16 @@ int Ripi_cpu_dvfs_thread(void *data)
p->mt_policy->max =
cpu_dvfs_get_freq_by_idx(p,
p->idx_opp_ppm_limit);
#else
/*
* since ppm will not use cpuhvfs_set_min_max,
* only sspm thermal will trigger this
*/
if (p->idx_opp_ppm_limit != previous_limit ||
p->idx_opp_ppm_base != previous_base) {
policy_update[i] = true;
}
#endif
#ifdef SINGLE_CLUSTER
cid = cpufreq_get_cluster_id(
@@ -387,6 +402,7 @@ int Ripi_cpu_dvfs_thread(void *data)
}
#endif
#if IS_ENABLED(CONFIG_MTK_CPU_CTRL)
#if defined(CONFIG_MACH_MT6893) || defined(CONFIG_MACH_MT6877) \
|| defined(CONFIG_MACH_MT6781)
if (p->mt_policy->cur > p->mt_policy->max) {
@@ -432,9 +448,21 @@ int Ripi_cpu_dvfs_thread(void *data)
p->mt_policy, &freqs, 0);
#endif
}
#endif
}
}
cpufreq_unlock(flags);
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
for_each_cpu_dvfs_only(i, p) {
if (policy_update[i] == false)
continue;
/* to make base/limit work */
cpufreq_update_policy(p->mt_policy->cpu);
policy_update[i] = false;
}
#endif
} while (!kthread_should_stop());
return 0;

View File

@@ -1090,6 +1090,29 @@ static unsigned int _calc_new_opp_idx(struct mt_cpu_dvfs *p, int new_opp_idx)
return new_opp_idx;
}
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
static void ppm_limit_callback(struct ppm_client_req req)
{
struct ppm_client_req *ppm = (struct ppm_client_req *)&req;
unsigned int i;
int min, max;
struct mt_cpu_dvfs *p;
/* set ppm dvfs limit to policy, same as scaling_min/max_freq */
for (i = 0; i < ppm->cluster_num; i++) {
min = ppm->cpu_limit[i].min_cpufreq_idx;
max = ppm->cpu_limit[i].max_cpufreq_idx;
if (ppm->cpu_limit[i].has_advise_freq)
min = max = ppm->cpu_limit[i].advise_cpufreq_idx;
p = id_to_cpu_dvfs(i);
if (p)
cpufreq_set_policy_ppm(p->mt_policy->cpu,
cpu_dvfs_get_freq_by_idx(p, min),
cpu_dvfs_get_freq_by_idx(p, max));
}
}
#else
static void ppm_limit_callback(struct ppm_client_req req)
{
struct ppm_client_req *ppm = (struct ppm_client_req *)&req;
@@ -1146,6 +1169,7 @@ static void ppm_limit_callback(struct ppm_client_req req)
_mt_cpufreq_dvfs_request_wrapper(NULL, 0, MT_CPU_DVFS_PPM, NULL);
#endif
}
#endif /* !IS_ENABLED(CONFIG_MTK_CPU_CTRL) */
/*
* cpufreq driver
@@ -1164,6 +1188,89 @@ static int _mt_cpufreq_verify(struct cpufreq_policy *policy)
return ret;
}
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
static int _mt_cpufreq_target_index(struct cpufreq_policy *policy, unsigned int index)
{
struct mt_cpu_dvfs *p = NULL;
unsigned int next_freq = policy->freq_table[index].frequency;
int cid = _get_cpu_dvfs_id(policy->cpu);
unsigned long flags;
int j;
int ret = 0;
cpufreq_lock(flags);
p = id_to_cpu_dvfs(cid);
if (!p) {
ret = -EINVAL;
goto out;
}
if (dvfs_disable_flag || p->dvfs_disable_by_suspend ||
p->dvfs_disable_by_procfs) {
ret = -EPERM;
goto out;
}
if (cpufreq_notifier_fp)
cpufreq_notifier_fp(cid, next_freq);
mt_cpufreq_set_by_wfi_load_cluster(cid, next_freq);
j = _search_available_freq_idx(p, next_freq, CPUFREQ_RELATION_L);
p->idx_opp_tbl = j;
arch_set_freq_scale(policy->cpus, next_freq,
policy->cpuinfo.max_freq);
out:
cpufreq_unlock(flags);
return ret;
}
/**
* cpufreq_sspm_thermal_notifier - notifier callback for cpufreq policy change.
* @nb: struct notifier_block * with callback info.
* @event: value showing cpufreq event for which this function invoked.
* @data: callback-specific data
*
* Callback to hijack the notification on cpufreq policy transition.
* Every time there is a change in policy, we will intercept and
* update the cpufreq policy with sspm thermal.
*
* Return: 0 (success)
*/
static int cpufreq_sspm_thermal_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
struct cpufreq_policy *policy = data;
struct mt_cpu_dvfs *p = NULL;
unsigned long flags;
int max;
if (event != CPUFREQ_ADJUST)
return NOTIFY_DONE;
cpufreq_lock(flags);
p = id_to_cpu_dvfs(_get_cpu_dvfs_id(policy->cpu));
if (!p) {
cpufreq_unlock(flags);
return NOTIFY_DONE;
}
max = cpu_dvfs_get_freq_by_idx(p, p->idx_opp_ppm_limit);
if (policy->max > max)
cpufreq_verify_within_limits(policy, 0, max);
cpufreq_unlock(flags);
return NOTIFY_OK;
}
/* Notifier for cpufreq policy change */
static struct notifier_block _mt_cpufreq_notifier_block = {
.notifier_call = cpufreq_sspm_thermal_notifier,
};
#else
static int _mt_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
{
@@ -1188,6 +1295,7 @@ static int _mt_cpufreq_target(struct cpufreq_policy *policy,
return 0;
}
#endif /* !IS_ENABLED(CONFIG_MTK_CPU_CTRL) */
#ifndef ONE_CLUSTER
int cci_is_inited;
@@ -1209,6 +1317,9 @@ static int _mt_cpufreq_init(struct cpufreq_policy *policy)
cpu_dev = get_cpu_device(policy->cpu);
policy->cpuinfo.transition_latency = 1000;
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
policy->dvfs_possible_from_any_cpu = true;
#endif
{
enum mt_cpu_dvfs_id id = _get_cpu_dvfs_id(policy->cpu);
@@ -1316,11 +1427,24 @@ static int _mt_cpufreq_exit(struct cpufreq_policy *policy)
static unsigned int _mt_cpufreq_get(unsigned int cpu)
{
struct mt_cpu_dvfs *p;
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
struct pll_ctrl_t *pll_p = NULL;
unsigned int cur_freq;
unsigned int j;
#endif
p = id_to_cpu_dvfs(_get_cpu_dvfs_id(cpu));
if (!p)
return 0;
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
if (p->mt_policy) {
pll_p = id_to_pll_ctrl(p->Pll_id);
cur_freq = pll_p->pll_ops->get_cur_freq(pll_p);
j = _search_available_freq_idx(p, cur_freq, CPUFREQ_RELATION_L);
return cpu_dvfs_get_freq_by_idx(p, j);
}
#endif
return cpu_dvfs_get_cur_freq(p);
}
@@ -1330,9 +1454,14 @@ static struct freq_attr *_mt_cpufreq_attr[] = {
};
static struct cpufreq_driver _mt_cpufreq_driver = {
#if IS_ENABLED(CONFIG_MTK_CPU_CTRL)
.flags = CPUFREQ_ASYNC_NOTIFICATION | CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
.verify = _mt_cpufreq_verify,
.target = _mt_cpufreq_target,
#else /* IS_ENABLED(CONFIG_MTK_CPU_CTRL) */
.flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
.target_index = _mt_cpufreq_target_index,
#endif /* IS_ENABLED(CONFIG_MTK_CPU_CTRL) */
.verify = _mt_cpufreq_verify,
.init = _mt_cpufreq_init,
.exit = _mt_cpufreq_exit,
.get = _mt_cpufreq_get,
@@ -1644,6 +1773,10 @@ static int _mt_cpufreq_pdrv_probe(struct platform_device *pdev)
}
cpufreq_register_driver(&_mt_cpufreq_driver);
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
cpufreq_register_notifier(&_mt_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
#endif
hp_online = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"cpu_dvfs:online",
@@ -1676,6 +1809,10 @@ static int _mt_cpufreq_pdrv_remove(struct platform_device *pdev)
FUNC_ENTER(FUNC_LV_MODULE);
cpuhp_remove_state_nocalls(hp_online);
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
cpufreq_unregister_notifier(&_mt_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
#endif
cpufreq_unregister_driver(&_mt_cpufreq_driver);
FUNC_EXIT(FUNC_LV_MODULE);

View File

@@ -178,7 +178,11 @@ int Ripi_cpu_dvfs_thread(void *data)
struct mt_cpu_dvfs *p;
unsigned long flags;
uint32_t pwdata[4];
#if IS_ENABLED(CONFIG_MTK_CPU_CTRL)
struct cpufreq_freqs freqs;
#else
bool policy_update[NR_MT_CPU_DVFS] = { false };
#endif
int previous_limit = -1;
int previous_base = -1;
@@ -346,6 +350,7 @@ int Ripi_cpu_dvfs_thread(void *data)
if (j > p->idx_opp_ppm_base)
j = p->idx_opp_ppm_base;
#if IS_ENABLED(CONFIG_MTK_CPU_CTRL)
/* Update policy min/max */
p->mt_policy->min =
@@ -357,7 +362,16 @@ int Ripi_cpu_dvfs_thread(void *data)
p->mt_policy->min =
(p->mt_policy->min > p->mt_policy->max) ?
p->mt_policy->max : p->mt_policy->min;
#else
/*
* since ppm will not use cpuhvfs_set_min_max,
* only sspm thermal will trigger this
*/
if (p->idx_opp_ppm_limit != previous_limit ||
p->idx_opp_ppm_base != previous_base) {
policy_update[i] = true;
}
#endif
#ifdef SINGLE_CLUSTER
cid =
cpufreq_get_cluster_id(p->mt_policy->cpu);
@@ -385,6 +399,7 @@ int Ripi_cpu_dvfs_thread(void *data)
}
#endif
#if IS_ENABLED(CONFIG_MTK_CPU_CTRL)
#if defined(CONFIG_MACH_MT6893) || defined(CONFIG_MACH_MT6877) \
|| defined(CONFIG_MACH_MT6781)
if (p->mt_policy->cur > p->mt_policy->max) {
@@ -430,10 +445,21 @@ int Ripi_cpu_dvfs_thread(void *data)
p->mt_policy, &freqs, 0);
#endif
}
#endif
}
}
cpufreq_unlock(flags);
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
for_each_cpu_dvfs_only(i, p) {
if (policy_update[i] == false)
continue;
/* to make base/limit work */
cpufreq_update_policy(p->mt_policy->cpu);
policy_update[i] = false;
}
#endif
} while (!kthread_should_stop());
return 0;
}

View File

@@ -1146,7 +1146,29 @@ static unsigned int _calc_new_opp_idx(struct mt_cpu_dvfs *p, int new_opp_idx)
return new_opp_idx;
}
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
static void ppm_limit_callback(struct ppm_client_req req)
{
struct ppm_client_req *ppm = (struct ppm_client_req *)&req;
unsigned int i;
int min, max;
struct mt_cpu_dvfs *p;
/* set ppm dvfs limit to policy, same as scaling_min/max_freq */
for (i = 0; i < ppm->cluster_num; i++) {
min = ppm->cpu_limit[i].min_cpufreq_idx;
max = ppm->cpu_limit[i].max_cpufreq_idx;
if (ppm->cpu_limit[i].has_advise_freq)
min = max = ppm->cpu_limit[i].advise_cpufreq_idx;
p = id_to_cpu_dvfs(i);
if (p)
cpufreq_set_policy_ppm(p->mt_policy->cpu,
cpu_dvfs_get_freq_by_idx(p, min),
cpu_dvfs_get_freq_by_idx(p, max));
}
}
#else
static void ppm_limit_callback(struct ppm_client_req req)
{
struct ppm_client_req *ppm = (struct ppm_client_req *)&req;
@@ -1204,6 +1226,7 @@ static void ppm_limit_callback(struct ppm_client_req req)
_mt_cpufreq_dvfs_request_wrapper(NULL, 0, MT_CPU_DVFS_PPM, NULL);
#endif
}
#endif /* !IS_ENABLED(CONFIG_MTK_CPU_CTRL) */
/*
* cpufreq driver
@@ -1222,6 +1245,89 @@ static int _mt_cpufreq_verify(struct cpufreq_policy *policy)
return ret;
}
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
static int _mt_cpufreq_target_index(struct cpufreq_policy *policy, unsigned int index)
{
struct mt_cpu_dvfs *p = NULL;
unsigned int next_freq = policy->freq_table[index].frequency;
int cid = _get_cpu_dvfs_id(policy->cpu);
unsigned long flags;
int j;
int ret = 0;
cpufreq_lock(flags);
p = id_to_cpu_dvfs(cid);
if (!p) {
ret = -EINVAL;
goto out;
}
if (dvfs_disable_flag || p->dvfs_disable_by_suspend ||
p->dvfs_disable_by_procfs) {
ret = -EPERM;
goto out;
}
if (cpufreq_notifier_fp)
cpufreq_notifier_fp(cid, next_freq);
mt_cpufreq_set_by_wfi_load_cluster(cid, next_freq);
j = _search_available_freq_idx(p, next_freq, CPUFREQ_RELATION_L);
p->idx_opp_tbl = j;
arch_set_freq_scale(policy->cpus, next_freq,
policy->cpuinfo.max_freq);
out:
cpufreq_unlock(flags);
return ret;
}
/**
* cpufreq_sspm_thermal_notifier - notifier callback for cpufreq policy change.
* @nb: struct notifier_block * with callback info.
* @event: value showing cpufreq event for which this function invoked.
* @data: callback-specific data
*
* Callback to hijack the notification on cpufreq policy transition.
* Every time there is a change in policy, we will intercept and
* update the cpufreq policy with sspm thermal.
*
* Return: 0 (success)
*/
static int cpufreq_sspm_thermal_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
struct cpufreq_policy *policy = data;
struct mt_cpu_dvfs *p = NULL;
unsigned long flags;
int max;
if (event != CPUFREQ_ADJUST)
return NOTIFY_DONE;
cpufreq_lock(flags);
p = id_to_cpu_dvfs(_get_cpu_dvfs_id(policy->cpu));
if (!p) {
cpufreq_unlock(flags);
return NOTIFY_DONE;
}
max = cpu_dvfs_get_freq_by_idx(p, p->idx_opp_ppm_limit);
if (policy->max > max)
cpufreq_verify_within_limits(policy, 0, max);
cpufreq_unlock(flags);
return NOTIFY_OK;
}
/* Notifier for cpufreq policy change */
static struct notifier_block _mt_cpufreq_notifier_block = {
.notifier_call = cpufreq_sspm_thermal_notifier,
};
#else
static int _mt_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
{
@@ -1246,6 +1352,7 @@ static int _mt_cpufreq_target(struct cpufreq_policy *policy,
return 0;
}
#endif /* !IS_ENABLED(CONFIG_MTK_CPU_CTRL) */
#ifndef ONE_CLUSTER
int cci_is_inited;
@@ -1267,6 +1374,9 @@ static int _mt_cpufreq_init(struct cpufreq_policy *policy)
cpu_dev = get_cpu_device(policy->cpu);
policy->cpuinfo.transition_latency = 1000;
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
policy->dvfs_possible_from_any_cpu = true;
#endif
#ifdef CPU_DVFS_NOT_READY
return 0;
@@ -1386,11 +1496,24 @@ static int _mt_cpufreq_exit(struct cpufreq_policy *policy)
static unsigned int _mt_cpufreq_get(unsigned int cpu)
{
struct mt_cpu_dvfs *p;
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
struct pll_ctrl_t *pll_p = NULL;
unsigned int cur_freq;
unsigned int j;
#endif
p = id_to_cpu_dvfs(_get_cpu_dvfs_id(cpu));
if (!p)
return 0;
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
if (p->mt_policy) {
pll_p = id_to_pll_ctrl(p->Pll_id);
cur_freq = pll_p->pll_ops->get_cur_freq(pll_p);
j = _search_available_freq_idx(p, cur_freq, CPUFREQ_RELATION_L);
return cpu_dvfs_get_freq_by_idx(p, j);
}
#endif
return cpu_dvfs_get_cur_freq(p);
}
@@ -1400,9 +1523,14 @@ static struct freq_attr *_mt_cpufreq_attr[] = {
};
static struct cpufreq_driver _mt_cpufreq_driver = {
#if IS_ENABLED(CONFIG_MTK_CPU_CTRL)
.flags = CPUFREQ_ASYNC_NOTIFICATION | CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
.verify = _mt_cpufreq_verify,
.target = _mt_cpufreq_target,
#else /* IS_ENABLED(CONFIG_MTK_CPU_CTRL) */
.flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
.target_index = _mt_cpufreq_target_index,
#endif /* IS_ENABLED(CONFIG_MTK_CPU_CTRL) */
.verify = _mt_cpufreq_verify,
.init = _mt_cpufreq_init,
.exit = _mt_cpufreq_exit,
.get = _mt_cpufreq_get,
@@ -1722,6 +1850,10 @@ static int _mt_cpufreq_pdrv_probe(struct platform_device *pdev)
#endif
}
cpufreq_register_driver(&_mt_cpufreq_driver);
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
cpufreq_register_notifier(&_mt_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
#endif
hp_online = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"cpu_dvfs:online",
cpuhp_cpufreq_online,
@@ -1748,6 +1880,10 @@ static int _mt_cpufreq_pdrv_remove(struct platform_device *pdev)
FUNC_ENTER(FUNC_LV_MODULE);
cpuhp_remove_state_nocalls(hp_online);
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
cpufreq_unregister_notifier(&_mt_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
#endif
cpufreq_unregister_driver(&_mt_cpufreq_driver);
FUNC_EXIT(FUNC_LV_MODULE);

View File

@@ -47,6 +47,10 @@
#define MAX(a, b) ((a) >= (b) ? (a) : (b))
#define MIN(a, b) ((a) >= (b) ? (b) : (a))
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
extern void (*cpufreq_notifier_fp)(int cid, unsigned long freq);
#endif
/*
* LOCK
*/

View File

@@ -108,7 +108,11 @@ int Ripi_cpu_dvfs_thread(void *data)
struct mt_cpu_dvfs *p;
unsigned long flags;
uint32_t pwdata[4];
#if IS_ENABLED(CONFIG_MTK_CPU_CTRL)
struct cpufreq_freqs freqs;
#else
bool policy_update[NR_MT_CPU_DVFS] = { false };
#endif
int previous_limit = -1;
int num_log;
@@ -233,6 +237,7 @@ int Ripi_cpu_dvfs_thread(void *data)
if (j > p->idx_opp_ppm_base)
j = p->idx_opp_ppm_base;
#if IS_ENABLED(CONFIG_MTK_CPU_CTRL)
/* Update policy min/max */
p->mt_policy->min =
cpu_dvfs_get_freq_by_idx(p,
@@ -240,6 +245,16 @@ int Ripi_cpu_dvfs_thread(void *data)
p->mt_policy->max =
cpu_dvfs_get_freq_by_idx(p,
p->idx_opp_ppm_limit);
#else
/*
* since ppm will not use cpuhvfs_set_min_max,
* only sspm thermal will trigger this
*/
if (p->idx_opp_ppm_limit != previous_limit ||
p->idx_opp_ppm_base != previous_base) {
policy_update[i] = true;
}
#endif
cid = arch_get_cluster_id(p->mt_policy->cpu);
if (cid == 0)
@@ -252,6 +267,7 @@ int Ripi_cpu_dvfs_thread(void *data)
met_tag_oneshot(0, "sched_dvfs_max_c2",
p->mt_policy->max);
#if IS_ENABLED(CONFIG_MTK_CPU_CTRL)
/* Policy notification */
if (p->idx_opp_tbl != j ||
(p->idx_opp_ppm_limit
@@ -266,10 +282,21 @@ int Ripi_cpu_dvfs_thread(void *data)
cpufreq_freq_transition_end(
p->mt_policy, &freqs, 0);
}
#endif
}
}
cpufreq_unlock(flags);
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
for_each_cpu_dvfs_only(i, p) {
if (policy_update[i] == false)
continue;
/* to make base/limit work */
cpufreq_update_policy(p->mt_policy->cpu);
policy_update[i] = false;
}
#endif
} while (!kthread_should_stop());
return 0;
}

View File

@@ -1001,6 +1001,29 @@ static unsigned int _calc_new_opp_idx(struct mt_cpu_dvfs *p, int new_opp_idx)
return new_opp_idx;
}
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
static void ppm_limit_callback(struct ppm_client_req req)
{
struct ppm_client_req *ppm = (struct ppm_client_req *)&req;
unsigned int i;
int min, max;
struct mt_cpu_dvfs *p;
/* set ppm dvfs limit to policy, same as scaling_min/max_freq */
for (i = 0; i < ppm->cluster_num; i++) {
min = ppm->cpu_limit[i].min_cpufreq_idx;
max = ppm->cpu_limit[i].max_cpufreq_idx;
if (ppm->cpu_limit[i].has_advise_freq)
min = max = ppm->cpu_limit[i].advise_cpufreq_idx;
p = id_to_cpu_dvfs(i);
if (p)
cpufreq_set_policy_ppm(p->mt_policy->cpu,
cpu_dvfs_get_freq_by_idx(p, min),
cpu_dvfs_get_freq_by_idx(p, max));
}
}
#else
static void ppm_limit_callback(struct ppm_client_req req)
{
struct ppm_client_req *ppm = (struct ppm_client_req *)&req;
@@ -1057,6 +1080,7 @@ static void ppm_limit_callback(struct ppm_client_req req)
_mt_cpufreq_dvfs_request_wrapper(NULL, 0, MT_CPU_DVFS_PPM, NULL);
#endif
}
#endif /* CONFIG_MTK_CPU_FREQ_STANDARDIZE */
#ifdef CONFIG_CPU_FREQ
/*
@@ -1076,6 +1100,89 @@ static int _mt_cpufreq_ver_dbgify(struct cpufreq_policy *policy)
return ret;
}
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
static int _mt_cpufreq_target_index(struct cpufreq_policy *policy, unsigned int index)
{
struct mt_cpu_dvfs *p = NULL;
unsigned int next_freq = policy->freq_table[index].frequency;
int cid = _get_cpu_dvfs_id(policy->cpu);
unsigned long flags;
int j;
int ret = 0;
cpufreq_lock(flags);
p = id_to_cpu_dvfs(cid);
if (!p) {
ret = -EINVAL;
goto out;
}
if (dvfs_disable_flag || p->dvfs_disable_by_suspend ||
p->dvfs_disable_by_procfs) {
ret = -EPERM;
goto out;
}
if (cpufreq_notifier_fp)
cpufreq_notifier_fp(cid, next_freq);
mt_cpufreq_set_by_wfi_load_cluster(cid, next_freq);
j = _search_available_freq_idx(p, next_freq, CPUFREQ_RELATION_L);
p->idx_opp_tbl = j;
arch_set_freq_scale(policy->cpus, next_freq,
policy->cpuinfo.max_freq);
out:
cpufreq_unlock(flags);
return ret;
}
/**
* cpufreq_sspm_thermal_notifier - notifier callback for cpufreq policy change.
* @nb: struct notifier_block * with callback info.
* @event: value showing cpufreq event for which this function invoked.
* @data: callback-specific data
*
* Callback to hijack the notification on cpufreq policy transition.
* Every time there is a change in policy, we will intercept and
* update the cpufreq policy with sspm thermal.
*
* Return: 0 (success)
*/
static int cpufreq_sspm_thermal_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
struct cpufreq_policy *policy = data;
struct mt_cpu_dvfs *p = NULL;
unsigned long flags;
int max;
if (event != CPUFREQ_ADJUST)
return NOTIFY_DONE;
cpufreq_lock(flags);
p = id_to_cpu_dvfs(_get_cpu_dvfs_id(policy->cpu));
if (!p) {
cpufreq_unlock(flags);
return NOTIFY_DONE;
}
max = cpu_dvfs_get_freq_by_idx(p, p->idx_opp_ppm_limit);
if (policy->max > max)
cpufreq_verify_within_limits(policy, 0, max);
cpufreq_unlock(flags);
return NOTIFY_OK;
}
/* Notifier for cpufreq policy change */
static struct notifier_block _mt_cpufreq_notifier_block = {
.notifier_call = cpufreq_sspm_thermal_notifier,
};
#else
static int _mt_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
@@ -1101,6 +1208,7 @@ static int _mt_cpufreq_target(struct cpufreq_policy *policy,
return 0;
}
#endif /* CONFIG_MTK_CPU_FREQ_STANDARDIZE */
static int _mt_cpufreq_init(struct cpufreq_policy *policy)
{
@@ -1116,6 +1224,9 @@ static int _mt_cpufreq_init(struct cpufreq_policy *policy)
cpu_dev = get_cpu_device(policy->cpu);
policy->cpuinfo.transition_latency = 1000;
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
policy->dvfs_possible_from_any_cpu = true;
#endif
{
enum mt_cpu_dvfs_id id = _get_cpu_dvfs_id(policy->cpu);
@@ -1188,11 +1299,24 @@ static int _mt_cpufreq_exit(struct cpufreq_policy *policy)
static unsigned int _mt_cpufreq_get(unsigned int cpu)
{
struct mt_cpu_dvfs *p;
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
struct pll_ctrl_t *pll_p = NULL;
unsigned int cur_freq;
unsigned int j;
#endif
p = id_to_cpu_dvfs(_get_cpu_dvfs_id(cpu));
if (!p)
return 0;
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
if (p->mt_policy) {
pll_p = id_to_pll_ctrl(p->Pll_id);
cur_freq = pll_p->pll_ops->get_cur_freq(pll_p);
j = _search_available_freq_idx(p, cur_freq, CPUFREQ_RELATION_L);
return cpu_dvfs_get_freq_by_idx(p, j);
}
#endif
return cpu_dvfs_get_cur_freq(p);
}
@@ -1202,9 +1326,14 @@ static struct freq_attr *_mt_cpufreq_attr[] = {
};
static struct cpufreq_driver _mt_cpufreq_driver = {
#if IS_ENABLED(CONFIG_MTK_CPU_CTRL)
.flags = CPUFREQ_ASYNC_NOTIFICATION,
.verify = _mt_cpufreq_ver_dbgify,
.target = _mt_cpufreq_target,
#else
.flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
.target_index = _mt_cpufreq_target_index,
#endif
.verify = _mt_cpufreq_ver_dbgify,
.init = _mt_cpufreq_init,
.exit = _mt_cpufreq_exit,
.get = _mt_cpufreq_get,
@@ -1319,6 +1448,10 @@ static int _mt_cpufreq_pdrv_probe(struct platform_device *pdev)
}
#ifdef CONFIG_CPU_FREQ
cpufreq_register_driver(&_mt_cpufreq_driver);
#endif
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
cpufreq_register_notifier(&_mt_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
#endif
hp_online = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"cpu_dvfs:online",
@@ -1344,6 +1477,10 @@ static int _mt_cpufreq_pdrv_remove(struct platform_device *pdev)
{
FUNC_ENTER(FUNC_LV_MODULE);
cpuhp_remove_state_nocalls(hp_online);
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
cpufreq_unregister_notifier(&_mt_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
#endif
#ifdef CONFIG_CPU_FREQ
cpufreq_unregister_driver(&_mt_cpufreq_driver);
#endif

View File

@@ -8,6 +8,13 @@ config MTK_ECCCI_DRIVER
eccci driver is disabled.
The option default disabled.
config MTK_ECCCI_NET_SPEED_MONITOR
bool "Enable ECCCI net speed monitor"
default y
depends on MTK_CPU_CTRL
help
Say Y here to enable ECCCI net speed monitor.
config MTK_ECCCI_CLDMA
tristate "ECCCI driver using CLDMA HW to communicate wtih LTE modem"
depends on MTK_ECCCI_DRIVER

View File

@@ -31,7 +31,7 @@ ccci_hif_all-y := ccci_hif.o \
ccci_hif_cldma.o \
ccci_hif_dpmaif.o dpmaif_drv.o ccci_debug_info.o
ccci_hif_all-y += net_speed_monitor.o
ccci_hif_all-$(CONFIG_MTK_ECCCI_NET_SPEED_MONITOR) += net_speed_monitor.o
ccci_hif_all-y += ccci_ringbuf.o ccci_hif_ccif.o
endif

View File

@@ -922,7 +922,7 @@ static int dpmaif_net_rx_push_thread(void *arg)
skb = ccci_skb_dequeue(&queue->skb_list);
if (!skb)
continue;
#ifdef MT6297
#if defined(MT6297) && defined(CONFIG_MTK_ECCCI_NET_SPEED_MONITOR)
mtk_ccci_add_dl_pkt_size(skb->len);
#endif
#ifndef CCCI_KMODULE_ENABLE
@@ -3101,7 +3101,7 @@ retry:
tx_force_md_assert("HW_REG_CHK_FAIL");
ret = 0;
}
#ifdef MT6297
#if defined(MT6297) && defined(CONFIG_MTK_ECCCI_NET_SPEED_MONITOR)
if (ret == 0)
mtk_ccci_add_ul_pkt_size(total_size);
#endif
@@ -4786,7 +4786,7 @@ int ccci_dpmaif_hif_init(struct device *dev)
&ccci_hif_dpmaif_ops);
register_syscore_ops(&dpmaif_sysops);
#ifdef MT6297
#if defined(MT6297) && defined(CONFIG_MTK_ECCCI_NET_SPEED_MONITOR)
mtk_ccci_speed_monitor_init();
#endif
atomic_set(&dpmaif_ctrl->suspend_flag, 0);

View File

@@ -19,6 +19,7 @@ config MTK_PERF_TRACKER
config MTK_CORE_CTL
bool "MTK Core Control with CPU Isolation"
depends on MTK_SCHED_EXTENSION
depends on MTK_CPU_CTRL
help
This config is for the core control policy with CPU isolation.
The core control online and offline CPU cores based on task

View File

@@ -1,15 +1,25 @@
config MTK_PERFMGR_TOUCH_BOOST
tristate "touch boost in performance manager"
config MTK_TOPO_CTRL
bool "MediaTek Topology information driver"
help
MTK_PERFMGR_TOUCH_BOOST is the kernel config of touch boost
feature designed by MTK, which is a performance improving
feature for user experience. When the device receives touch
event, it will raise system resources(CPU frequency / CPU cores)
for better user experience.
Say Y here to enable MediaTek Topology information driver.
This driver provides the topology information of the system
to userspace and kernel modules.
config MTK_CPU_CTRL
bool "CPU CTRL support"
depends on MTK_CPU_FREQ
depends on MTK_TOPO_CTRL
depends on !MTK_GKI_COMPAT
help
Say Y here to enable MediaTeks custom CPU controller.
This will prevent the standard cpufreq target interface
from working, so it should only be used on devices that
are using MediaTeks Power HAL.
config MTK_LOAD_TRACKER
tristate "CPU Loading Tracking Service"
depends on CPU_FREQ
depends on CPU_FREQ && MTK_CPU_CTRL
help
MTK_LOAD_TRACKER is the kernel config of CPU Loading Tracking
Service designed by MTK, which is a service to get CPU Loading
@@ -18,6 +28,7 @@ config MTK_LOAD_TRACKER
config MTK_CPU_CTRL_CFP
tristate "CPU CTRL Ceiling-Fool-Proof"
depends on MTK_CPU_CTRL
depends on MTK_LOAD_TRACKER
help
MTK_CPU_CTRL_CFP is the kernel config of CPU controller
@@ -25,6 +36,29 @@ config MTK_CPU_CTRL_CFP
will free CPU frequency ceiling during heavy CPU loading.
This function depends on MTK_LOAD_TRACKER.
config MTK_EAS_CTRL
bool "MediaTek EAS control"
depends on MTK_CPU_CTRL
help
Say Y here to enable MediaTeks EAS control driver.
This driver provides the EAS control interface to userspace.
config MTK_SYSLIMITER
bool "MTK system limiter support"
depends on MTK_CPU_CTRL
help
Say Y here to enable MediaTeks syslimiter driver.
config MTK_PERFMGR_TOUCH_BOOST
tristate "touch boost in performance manager"
depends on MTK_CPU_CTRL
help
MTK_PERFMGR_TOUCH_BOOST is the kernel config of touch boost
feature designed by MTK, which is a performance improving
feature for user experience. When the device receives touch
event, it will raise system resources(CPU frequency / CPU cores)
for better user experience.
config MTK_PERF_OBSERVER
bool "MTK system performance observer support"
help
@@ -43,6 +77,8 @@ config MTK_RESYM
config MTK_FPSGO_V3
bool "Support FPSGO_V3 framework"
depends on MTK_CPU_CTRL
depends on MTK_PERFMGR_TOUCH_BOOST
help
Support FPSGO_V3 framework for most FPS performance and
low power balance. As a perquisite to ensure FPS performance,
@@ -76,4 +112,3 @@ config MTK_FRS
Support Frame Rate Smoother kernel intergface. It will adjustment
target fps by thermal.
If you are not sure about this, set n.

View File

@@ -25,7 +25,7 @@ obj-y += base/
obj-$(CONFIG_MTK_PERF_OBSERVER) += observer/
obj-$(CONFIG_MTK_RESYM) += resym/
obj-$(CONFIG_MTK_GBE) += gbe/
obj-y += syslimiter/
obj-$(CONFIG_MTK_SYSLIMITER) += syslimiter/
#ifeq (,$(findstring mt8,$(CONFIG_MTK_PLATFORM)))
obj-y += boost_ctrl/

View File

@@ -10,6 +10,6 @@
# GNU General Public License for more details.
#
obj-y += utility.o
ifeq ($(or $(CONFIG_MTK_CPU_CTRL),$(CONFIG_MTK_TOPO_CTRL),$(CONFIG_MTK_EAS_CTRL)),y)
obj-y += utility.o
endif

View File

@@ -10,24 +10,32 @@
int init_boostctrl(struct proc_dir_entry *parent)
{
struct proc_dir_entry *bstctrl_root = NULL;
#ifdef CONFIG_MTK_EAS_CTRL
struct proc_dir_entry *easctrl_root = NULL;
#endif
pr_debug("__init %s\n", __func__);
bstctrl_root = proc_mkdir("boost_ctrl", parent);
#ifdef CONFIG_MTK_TOPO_CTRL
/* init topology info first */
topo_ctrl_init(bstctrl_root);
#endif
#ifdef CONFIG_MTK_CPU_CTRL
cpu_ctrl_init(bstctrl_root);
#endif
dram_ctrl_init(bstctrl_root);
#ifdef CONFIG_MTK_EAS_CTRL
/* EAS */
easctrl_root = proc_mkdir("eas_ctrl", bstctrl_root);
uclamp_ctrl_init(easctrl_root);
eas_ctrl_init(easctrl_root);
#endif
return 0;
}

View File

@@ -11,23 +11,9 @@
# GNU General Public License for more details.
#
ifeq ($(CONFIG_MTK_PPM),y)
ifeq ($(CONFIG_MTK_CPU_CTRL),y)
obj-y += cpu_ctrl.o
else
# no CONFIG_MTK_PPM but specific lagacy platform
ifneq (,$(filter $(CONFIG_MTK_PLATFORM), "mt6761" "mt6765" "mt6768" "mt6873" "mt6885" "mt6893" "mt6771" "mt6785" "mt6853" "mt6739"))
ifeq ($(CONFIG_MTK_GKI_COMPAT),y)
obj-y += cpu_ctrl_dummy.o
else
obj-y += cpu_ctrl.o
endif # CONFIG_MTK_GKI_COMPAT
else
obj-y += cpu_ctrl_dummy.o
endif
endif # CONFIG_MTK_PPM
ifeq ($(CONFIG_MACH_MT6893),y)
ccflags-y += \

View File

@@ -1,30 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2019 MediaTek Inc.
*/
#include <linux/module.h>
#include <mt-plat/cpu_ctrl.h>
#include <linux/proc_fs.h>
int powerhal_tid;
/*******************************************/
int update_userlimit_cpu_freq(int kicker, int num_cluster
, struct cpu_ctrl_data *freq_limit)
{
return 0;
}
EXPORT_SYMBOL(update_userlimit_cpu_freq);
int cpu_ctrl_init(struct proc_dir_entry *parent)
{
struct proc_dir_entry *boost_dir = NULL;
boost_dir = proc_mkdir("cpu_ctrl_dummy", parent);
if (!boost_dir)
pr_debug("boost_dir null\n ");
return 0;
}

View File

@@ -55,4 +55,4 @@ ccflags-y += \
-DMTK_K14_EAS_BOOST
endif
obj-y += eas_ctrl.o uclamp_ctrl.o
obj-$(CONFIG_MTK_EAS_CTRL) += eas_ctrl.o uclamp_ctrl.o

View File

@@ -14,4 +14,4 @@
ccflags-y += \
-I$(srctree)/kernel/
obj-y += topo_ctrl.o
obj-$(CONFIG_MTK_TOPO_CTRL) += topo_ctrl.o

View File

@@ -30,7 +30,7 @@ struct platform_device perfmgr_device = {
static int perfmgr_suspend(struct device *dev)
{
#ifdef CONFIG_MTK_PERFMGR_TOUCH_BOOST
#if defined(CONFIG_MTK_PERFMGR_TOUCH_BOOST) && !(defined(CONFIG_MTK_FPSGO) || defined(CONFIG_MTK_FPSGO_V3))
ktch_suspend();
#endif
return 0;
@@ -80,10 +80,13 @@ static int __init init_perfmgr(void)
pr_debug("MTK_TOUCH_BOOST function init_perfmgr_touch\n");
init_boostctrl(perfmgr_root);
#ifdef CONFIG_MTK_PERFMGR_TOUCH_BOOST
init_tchbst(perfmgr_root);
#endif
init_perfctl(perfmgr_root);
#ifdef CONFIG_MTK_SYSLIMITER
syslimiter_init(perfmgr_root);
#endif
#ifdef CONFIG_MTK_LOAD_TRACKER
init_uload_ind(NULL);
#endif

View File

@@ -10,12 +10,14 @@
# GNU General Public License for more details.
#
obj-y += tchbst_main.o
ifeq ($(CONFIG_MTK_PERFMGR_TOUCH_BOOST),y)
obj-$(CONFIG_MTK_FPSGO) += user/
obj-$(CONFIG_MTK_FPSGO_V3) += user/
obj-y += tchbst_main.o
#ifeq ($(CONFIG_MTK_PERFMGR_TOUCH_BOOST),y)
obj-$(CONFIG_MTK_PERFMGR_TOUCH_BOOST) += kernel/
#endif # CONFIG_MTK_PERFMGR_TOUCH_BOOST
ifneq ($(CONFIG_MTK_FPSGO_V3),y)
ifneq ($(CONFIG_MTK_FPSGO),y)
obj-y += kernel/
endif
endif
endif

View File

@@ -16,13 +16,14 @@ int init_tchbst(struct proc_dir_entry *parent)
/*create touch root procfs*/
tchbst_root = proc_mkdir("tchbst", parent);
#ifdef CONFIG_MTK_PERFMGR_TOUCH_BOOST
/*initial kernel touch parameter*/
init_ktch(tchbst_root);
#endif
#if defined(CONFIG_MTK_FPSGO) || defined(CONFIG_MTK_FPSGO_V3)
/*initial user touch parameter*/
init_utch(tchbst_root);
#endif
#else /* defined(CONFIG_MTK_FPSGO) || defined(CONFIG_MTK_FPSGO_V3) */
#ifdef CONFIG_MTK_PERFMGR_TOUCH_BOOST
/*initial kernel touch parameter*/
init_ktch(tchbst_root);
#endif /* CONFIG_MTK_PERFMGR_TOUCH_BOOST */
#endif /* defined(CONFIG_MTK_FPSGO) || defined(CONFIG_MTK_FPSGO_V3) */
return 0;
}

View File

@@ -0,0 +1,7 @@
config MTK_USB_BOOST
bool "MediaTek USB boost driver"
depends on USB && MTK_CPU_CTRL
help
Say Y here if you want to enable USB boost driver.
This driver is used to boost CPU frequency and DRAM voltage
when USB is plugged in.

View File

@@ -1,4 +1,4 @@
obj-y := usb_boost.o
obj-$(CONFIG_MTK_USB_BOOST) := usb_boost.o
obj-$(CONFIG_MACH_MT6739) += v1/
obj-$(CONFIG_MACH_MT6761) += v1/
obj-$(CONFIG_MACH_MT6779) += v1/

View File

@@ -5,6 +5,7 @@
#ifndef _MTK_USB_BOOST_H
#define _MTK_USB_BOOST_H
#ifdef CONFIG_MTK_USB_BOOST
enum{
TYPE_CPU_FREQ,
TYPE_CPU_CORE,
@@ -44,5 +45,7 @@ void register_usb_boost_act(int type_id, int action_id,
#else
#define USB_BOOST_DBG(fmt, args...) do {} while (0)
#endif
#else
static inline void usb_boost(void) {}
#endif
#endif

View File

@@ -1,7 +1,7 @@
ccflags-y += -I$(srctree)/drivers/misc/mediatek/usb_boost
ccflags-y += -I$(srctree)/drivers/misc/mediatek/include/mt-plat
ccflags-y += -I$(srctree)/drivers/devfreq
obj-y := usb_boost_plat.o
obj-$(CONFIG_MTK_USB_BOOST) := usb_boost_plat.o
ifeq (y, $(filter y, $(CONFIG_MACH_MT6739) $(CONFIG_MACH_MT6768) $(CONFIG_MACH_MT6781) \
$(CONFIG_MACH_MT6833) $(CONFIG_MACH_MT6877) $(CONFIG_MACH_MT6893)))

View File

@@ -192,6 +192,9 @@ void disable_cpufreq(void);
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
void cpufreq_update_policy(unsigned int cpu);
#if !IS_ENABLED(CONFIG_MTK_CPU_CTRL)
void cpufreq_set_policy_ppm(unsigned int cpu, int min, int max);
#endif
bool have_governor_per_policy(void);
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);

View File

@@ -59,7 +59,7 @@ config MTK_SCHED_CPU_PREFER
config MTK_SCHED_BIG_TASK_MIGRATE
bool "mtk scheduling big task migrate"
depends on MTK_SCHED_EXTENSION && SMP
depends on MTK_SCHED_EXTENSION && MTK_EAS_CTRL && SMP
help
Migrate misfit task more efficient in scheduler tick.
1. Big task migration: migrate misfit task in scheduler tick