diff --git a/arch/arm64/configs/raphael_defconfig b/arch/arm64/configs/raphael_defconfig index 5a06bbe21533..5548e5273587 100644 --- a/arch/arm64/configs/raphael_defconfig +++ b/arch/arm64/configs/raphael_defconfig @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 4.14.190 Kernel Configuration +# Linux/arm64 4.14.189 Kernel Configuration # CONFIG_ARM64=y CONFIG_64BIT=y @@ -168,7 +168,6 @@ CONFIG_NET_NS=y # CONFIG_SCHED_AUTOGROUP is not set CONFIG_SCHED_TUNE=y # CONFIG_CPUSETS_ASSIST is not set -CONFIG_DYNAMIC_STUNE_BOOST=y CONFIG_DEFAULT_USE_ENERGY_AWARE=y # CONFIG_SYSFS_DEPRECATED is not set # CONFIG_RELAY is not set @@ -733,10 +732,11 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=y # CONFIG_CPU_FREQ_GOV_USERSPACE is not set # CONFIG_CPU_FREQ_GOV_ONDEMAND is not set # CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set -CONFIG_CPU_BOOST=y +# CONFIG_CPU_BOOST is not set CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_SCHEDUTIL_UP_RATE_LIMIT=500 +CONFIG_SCHEDUTIL_DOWN_RATE_LIMIT=20000 # CONFIG_CPU_FREQ_GOV_INTERACTIVE is not set -# CONFIG_CPU_INPUT_BOOST is not set # # CPU frequency scaling drivers diff --git a/drivers/cpufreq/cpu-boost.c b/drivers/cpufreq/cpu-boost.c index da4884795cdf..b6ceb2133b9b 100644 --- a/drivers/cpufreq/cpu-boost.c +++ b/drivers/cpufreq/cpu-boost.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2013-2015,2017, The Linux Foundation. All rights reserved. + * Copyright (C) 2019 XiaoMi, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -17,46 +18,60 @@ #include #include #include +#include #include #include #include #include #include +#include + +#include struct cpu_sync { int cpu; unsigned int input_boost_min; unsigned int input_boost_freq; + unsigned int powerkey_input_boost_freq; +}; + +enum input_boost_type { + default_input_boost, + powerkey_input_boost }; static DEFINE_PER_CPU(struct cpu_sync, sync_info); -static struct workqueue_struct *cpu_boost_wq; -static struct work_struct input_boost_work; +static struct kthread_work input_boost_work; + +static struct kthread_work powerkey_input_boost_work; static bool input_boost_enabled; static unsigned int input_boost_ms = 40; module_param(input_boost_ms, uint, 0644); +static unsigned int powerkey_input_boost_ms = 400; +module_param(powerkey_input_boost_ms, uint, 0644); + static unsigned int sched_boost_on_input; module_param(sched_boost_on_input, uint, 0644); -static bool sched_boost_active; +static bool sched_boost_on_powerkey_input = true; +module_param(sched_boost_on_powerkey_input, bool, 0644); -#ifdef CONFIG_DYNAMIC_STUNE_BOOST -static int dynamic_stune_boost = 1; -module_param(dynamic_stune_boost, uint, 0644); -static bool stune_boost_active; -static int boost_slot; -static unsigned int dynamic_stune_boost_ms = 40; -module_param(dynamic_stune_boost_ms, uint, 0644); -static struct delayed_work dynamic_stune_boost_rem; -#endif /* CONFIG_DYNAMIC_STUNE_BOOST */ +static bool sched_boost_active; static struct delayed_work input_boost_rem; static u64 last_input_time; -#define MIN_INPUT_INTERVAL (150 * USEC_PER_MSEC) + +static struct kthread_worker cpu_boost_worker; +static struct task_struct *cpu_boost_worker_thread; + +static struct kthread_worker powerkey_cpu_boost_worker; +static struct task_struct *powerkey_cpu_boost_worker_thread; + +#define MIN_INPUT_INTERVAL (100 * USEC_PER_MSEC) static int set_input_boost_freq(const char *buf, const struct kernel_param *kp) { @@ -64,6 +79,12 @@ static int set_input_boost_freq(const char *buf, const struct kernel_param *kp) unsigned int val, cpu; const char *cp = buf; bool enabled = false; + enum input_boost_type type; + + if (strstr(kp->name, "input_boost_freq")) + type = default_input_boost; + if (strstr(kp->name, "powerkey_input_boost_freq")) + type = powerkey_input_boost; while ((cp = strpbrk(cp + 1, " :"))) ntokens++; @@ -72,8 +93,12 @@ static int set_input_boost_freq(const char *buf, const struct kernel_param *kp) if (!ntokens) { if (sscanf(buf, "%u\n", &val) != 1) return -EINVAL; - for_each_possible_cpu(i) - per_cpu(sync_info, i).input_boost_freq = val; + for_each_possible_cpu(i) { + if (type == default_input_boost) + per_cpu(sync_info, i).input_boost_freq = val; + else if (type == powerkey_input_boost) + per_cpu(sync_info, i).powerkey_input_boost_freq = val; + } goto check_enable; } @@ -88,14 +113,18 @@ static int set_input_boost_freq(const char *buf, const struct kernel_param *kp) if (cpu >= num_possible_cpus()) return -EINVAL; - per_cpu(sync_info, cpu).input_boost_freq = val; + if (type == default_input_boost) + per_cpu(sync_info, cpu).input_boost_freq = val; + else if (type == powerkey_input_boost) + per_cpu(sync_info, cpu).powerkey_input_boost_freq = val; cp = strnchr(cp, PAGE_SIZE - (cp - buf), ' '); cp++; } check_enable: for_each_possible_cpu(i) { - if (per_cpu(sync_info, i).input_boost_freq) { + if (per_cpu(sync_info, i).input_boost_freq + || per_cpu(sync_info, i).powerkey_input_boost_freq) { enabled = true; break; } @@ -109,11 +138,22 @@ static int get_input_boost_freq(char *buf, const struct kernel_param *kp) { int cnt = 0, cpu; struct cpu_sync *s; + unsigned int boost_freq = 0; + enum input_boost_type type; + + if (strstr(kp->name, "input_boost_freq")) + type = default_input_boost; + if (strstr(kp->name, "powerkey_input_boost_freq")) + type = powerkey_input_boost; for_each_possible_cpu(cpu) { s = &per_cpu(sync_info, cpu); + if (type == default_input_boost) + boost_freq = s->input_boost_freq; + else if(type == powerkey_input_boost) + boost_freq = s->powerkey_input_boost_freq; cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, - "%d:%u ", cpu, s->input_boost_freq); + "%d:%u ", cpu, boost_freq); } cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n"); return cnt; @@ -125,6 +165,8 @@ static const struct kernel_param_ops param_ops_input_boost_freq = { }; module_param_cb(input_boost_freq, ¶m_ops_input_boost_freq, NULL, 0644); +module_param_cb(powerkey_input_boost_freq, ¶m_ops_input_boost_freq, NULL, 0644); + /* * The CPUFREQ_ADJUST notifier is used to override the current policy min to * make sure policy min >= boost_min. The cpufreq framework then does the job @@ -197,38 +239,17 @@ static void do_input_boost_rem(struct work_struct *work) } } -#ifdef CONFIG_DYNAMIC_STUNE_BOOST -static void do_dynamic_stune_boost_rem(struct work_struct *work) -{ - /* Reset dynamic stune boost value to the default value */ - if (stune_boost_active) { - reset_stune_boost("top-app", boost_slot); - stune_boost_active = false; - } -} -#endif /* CONFIG_DYNAMIC_STUNE_BOOST */ - -static void do_input_boost(struct work_struct *work) +static void do_input_boost(struct kthread_work *work) { unsigned int i, ret; struct cpu_sync *i_sync_info; -#ifdef CONFIG_DYNAMIC_STUNE_BOOST - cancel_delayed_work_sync(&dynamic_stune_boost_rem); -#endif /* CONFIG_DYNAMIC_STUNE_BOOST */ cancel_delayed_work_sync(&input_boost_rem); if (sched_boost_active) { sched_set_boost(0); sched_boost_active = false; } -#ifdef CONFIG_DYNAMIC_STUNE_BOOST - if (stune_boost_active) { - reset_stune_boost("top-app", boost_slot); - stune_boost_active = false; - } -#endif /* CONFIG_DYNAMIC_STUNE_BOOST */ - /* Set the input_boost_min for all CPUs in the system */ pr_debug("Setting input boost min for all CPUs\n"); for_each_possible_cpu(i) { @@ -248,18 +269,41 @@ static void do_input_boost(struct work_struct *work) sched_boost_active = true; } -#ifdef CONFIG_DYNAMIC_STUNE_BOOST - /* Set dynamic stune boost value */ - ret = do_stune_boost("top-app", dynamic_stune_boost, &boost_slot); - if (!ret) - stune_boost_active = true; + schedule_delayed_work(&input_boost_rem, msecs_to_jiffies(input_boost_ms)); +} - queue_delayed_work(cpu_boost_wq, &dynamic_stune_boost_rem, - msecs_to_jiffies(dynamic_stune_boost_ms)); -#endif /* CONFIG_DYNAMIC_STUNE_BOOST */ +static void do_powerkey_input_boost(struct kthread_work *work) +{ - queue_delayed_work(cpu_boost_wq, &input_boost_rem, - msecs_to_jiffies(input_boost_ms)); + unsigned int i, ret; + struct cpu_sync *i_sync_info; + cancel_delayed_work_sync(&input_boost_rem); + if (sched_boost_active) { + sched_set_boost(0); + sched_boost_active = false; + } + + /* Set the powerkey_input_boost_min for all CPUs in the system */ + pr_debug("Setting powerkey input boost min for all CPUs\n"); + for_each_possible_cpu(i) { + i_sync_info = &per_cpu(sync_info, i); + i_sync_info->input_boost_min = i_sync_info->powerkey_input_boost_freq; + } + + /* Update policies for all online CPUs */ + update_policy_online(); + + /* Enable scheduler boost to migrate tasks to big cluster */ + if (sched_boost_on_powerkey_input) { + ret = sched_set_boost(1); + if (ret) + pr_err("cpu-boost: HMP boost enable failed\n"); + else + sched_boost_active = true; + } + + schedule_delayed_work(&input_boost_rem, + msecs_to_jiffies(powerkey_input_boost_ms)); } static void cpuboost_input_event(struct input_handle *handle, @@ -274,13 +318,40 @@ static void cpuboost_input_event(struct input_handle *handle, if (now - last_input_time < MIN_INPUT_INTERVAL) return; - if (work_pending(&input_boost_work)) + if (queuing_blocked(&cpu_boost_worker, &input_boost_work)) return; - queue_work(cpu_boost_wq, &input_boost_work); + kthread_queue_work(&cpu_boost_worker, &input_boost_work); + + if ((type == EV_KEY && code == KEY_POWER) || + (type == EV_KEY && code == KEY_WAKEUP)) { + kthread_queue_work(&cpu_boost_worker, &powerkey_input_boost_work); + } else { + kthread_queue_work(&cpu_boost_worker, &input_boost_work); + } last_input_time = ktime_to_us(ktime_get()); } +void touch_irq_boost(void) +{ + u64 now; + + if (!input_boost_enabled) + return; + + now = ktime_to_us(ktime_get()); + if (now - last_input_time < MIN_INPUT_INTERVAL) + return; + + if (queuing_blocked(&cpu_boost_worker, &input_boost_work)) + return; + + kthread_queue_work(&cpu_boost_worker, &input_boost_work); + + last_input_time = ktime_to_us(ktime_get()); +} +EXPORT_SYMBOL(touch_irq_boost); + static int cpuboost_input_connect(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id) { @@ -313,11 +384,6 @@ err2: static void cpuboost_input_disconnect(struct input_handle *handle) { -#ifdef CONFIG_DYNAMIC_STUNE_BOOST - /* Reset dynamic stune boost value to the default value */ - reset_stune_boost("top-app", boost_slot); -#endif /* CONFIG_DYNAMIC_STUNE_BOOST */ - input_close_device(handle); input_unregister_handle(handle); kfree(handle); @@ -359,18 +425,52 @@ static struct input_handler cpuboost_input_handler = { static int cpu_boost_init(void) { - int cpu, ret; + int cpu, ret, i; struct cpu_sync *s; + struct sched_param param = { .sched_priority = 2 }; + cpumask_t sys_bg_mask; - cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0); - if (!cpu_boost_wq) + /* Hardcode the cpumask to bind the kthread to it */ + cpumask_clear(&sys_bg_mask); + for (i = 0; i <= 3; i++) { + cpumask_set_cpu(i, &sys_bg_mask); + } + + kthread_init_worker(&cpu_boost_worker); + cpu_boost_worker_thread = kthread_create(kthread_worker_fn, + &cpu_boost_worker, "cpu_boost_worker_thread"); + if (IS_ERR(cpu_boost_worker_thread)) { + pr_err("cpu-boost: Failed to init kworker!\n"); return -EFAULT; + } - INIT_WORK(&input_boost_work, do_input_boost); + ret = sched_setscheduler(cpu_boost_worker_thread, SCHED_FIFO, ¶m); + if (ret) + pr_err("cpu-boost: Failed to set SCHED_FIFO!\n"); + + kthread_init_worker(&powerkey_cpu_boost_worker); + powerkey_cpu_boost_worker_thread = kthread_create(kthread_worker_fn, + &powerkey_cpu_boost_worker, "powerkey_cpu_boost_worker_thread"); + if (IS_ERR(powerkey_cpu_boost_worker_thread)) { + pr_err("powerkey_cpu-boost: Failed to init kworker!\n"); + return -EFAULT; + } + + ret = sched_setscheduler(powerkey_cpu_boost_worker_thread, SCHED_FIFO, ¶m); + if (ret) + pr_err("powerkey_cpu-boost: Failed to set SCHED_FIFO!\n"); + + /* Now bind it to the cpumask */ + kthread_bind_mask(cpu_boost_worker_thread, &sys_bg_mask); + kthread_bind_mask(powerkey_cpu_boost_worker_thread, &sys_bg_mask); + + /* Wake it up! */ + wake_up_process(cpu_boost_worker_thread); + wake_up_process(powerkey_cpu_boost_worker_thread); + + kthread_init_work(&input_boost_work, do_input_boost); + kthread_init_work(&powerkey_input_boost_work, do_powerkey_input_boost); INIT_DELAYED_WORK(&input_boost_rem, do_input_boost_rem); -#ifdef CONFIG_DYNAMIC_STUNE_BOOST - INIT_DELAYED_WORK(&dynamic_stune_boost_rem, do_dynamic_stune_boost_rem); -#endif /* CONFIG_DYNAMIC_STUNE_BOOST */ for_each_possible_cpu(cpu) { s = &per_cpu(sync_info, cpu); diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 144006195c5c..413b05795a65 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -187,6 +187,19 @@ extern void __kthread_init_worker(struct kthread_worker *worker, TIMER_IRQSAFE); \ } while (0) +/* + * Returns true when the work could not be queued at the moment. + * It happens when it is already pending in a worker list + * or when it is being cancelled. + */ +static inline bool queuing_blocked(struct kthread_worker *worker, + struct kthread_work *work) +{ + lockdep_assert_held(&worker->lock); + + return !list_empty(&work->node) || work->canceling; +} + int kthread_worker_fn(void *worker_ptr); __printf(2, 3) diff --git a/include/linux/sched.h b/include/linux/sched.h index d7d9191133ab..bff0ed6de4c7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2012,10 +2012,4 @@ static inline void set_wake_up_idle(bool enabled) current->flags &= ~PF_WAKE_UP_IDLE; } -#ifdef CONFIG_DYNAMIC_STUNE_BOOST -int do_stune_boost(char *st_name, int boost, int *slot); -int do_stune_sched_boost(char *st_name, int *slot); -int reset_stune_boost(char *st_name, int slot); -#endif /* CONFIG_DYNAMIC_STUNE_BOOST */ - #endif diff --git a/init/Kconfig b/init/Kconfig index 739dc7ddac5d..6c14e5499fb9 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1170,15 +1170,6 @@ config CPUSET_TOP_APP endif -config DYNAMIC_STUNE_BOOST - bool "Dynamic SchedTune boosting support" - depends on SCHED_TUNE - help - This option extends the SchedTune framework and provides APIs to - activate and reset SchedTune boosting from anywhere in the kernel. - - If unsure, say N. - config DEFAULT_USE_ENERGY_AWARE bool "Default to enabling the Energy Aware Scheduler feature" default n diff --git a/kernel/kthread.c b/kernel/kthread.c index 993384401c9a..a6d8ba28f5ab 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -746,19 +746,6 @@ kthread_create_worker_on_cpu(int cpu, unsigned int flags, } EXPORT_SYMBOL(kthread_create_worker_on_cpu); -/* - * Returns true when the work could not be queued at the moment. - * It happens when it is already pending in a worker list - * or when it is being cancelled. - */ -static inline bool queuing_blocked(struct kthread_worker *worker, - struct kthread_work *work) -{ - lockdep_assert_held(&worker->lock); - - return !list_empty(&work->node) || work->canceling; -} - static void kthread_insert_work_sanity_check(struct kthread_worker *worker, struct kthread_work *work) { diff --git a/kernel/sched/boost.c b/kernel/sched/boost.c index 5c08acaf30f1..c3c29e1796b8 100644 --- a/kernel/sched/boost.c +++ b/kernel/sched/boost.c @@ -16,10 +16,6 @@ #include #include -#ifdef CONFIG_DYNAMIC_STUNE_BOOST -static int boost_slot; -#endif // CONFIG_DYNAMIC_STUNE_BOOST - /* * Scheduler boost is a mechanism to temporarily place tasks on CPUs * with higher capacity than those where a task would have normally @@ -218,14 +214,6 @@ static void sched_boost_disable_all(void) static void _sched_set_boost(int type) { - -#ifdef CONFIG_DYNAMIC_STUNE_BOOST - if (type > 0) - do_stune_sched_boost("top-app", &boost_slot); - else - reset_stune_boost("top-app", boost_slot); -#endif // CONFIG_DYNAMIC_STUNE_BOOST - if (type == 0) sched_boost_disable_all(); else if (type > 0) diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c index 4a278bc12587..fef0a5d6b8ff 100644 --- a/kernel/sched/tune.c +++ b/kernel/sched/tune.c @@ -7,7 +7,6 @@ #include #include -#include #include "sched.h" #include "tune.h" @@ -18,18 +17,6 @@ extern struct reciprocal_value schedtune_spc_rdiv; /* We hold schedtune boost in effect for at least this long */ #define SCHEDTUNE_BOOST_HOLD_NS 50000000ULL -#ifdef CONFIG_DYNAMIC_STUNE_BOOST -#define DYNAMIC_BOOST_SLOTS_COUNT 5 -static DEFINE_MUTEX(boost_slot_mutex); -static DEFINE_MUTEX(stune_boost_mutex); -static struct schedtune *getSchedtune(char *st_name); -static int dynamic_boost(struct schedtune *st, int boost); -struct boost_slot { - struct list_head list; - int idx; -}; -#endif /* CONFIG_DYNAMIC_STUNE_BOOST */ - /* * EAS scheduler tunables for task groups. */ @@ -69,27 +56,6 @@ struct schedtune { /* Hint to bias scheduling of tasks on that SchedTune CGroup * towards idle CPUs */ int prefer_idle; - -#ifdef CONFIG_DYNAMIC_STUNE_BOOST - /* - * This tracks the default boost value and is used to restore - * the value when Dynamic SchedTune Boost is reset. - */ - int boost_default; - - /* Sched Boost value for tasks on that SchedTune CGroup */ - int sched_boost; - - /* Number of ongoing boosts for this SchedTune CGroup */ - int boost_count; - - /* Lists of active and available boost slots */ - struct boost_slot active_boost_slots; - struct boost_slot available_boost_slots; - - /* Array of tracked boost values of each slot */ - int slot_boost[DYNAMIC_BOOST_SLOTS_COUNT]; -#endif /* CONFIG_DYNAMIC_STUNE_BOOST */ }; static inline struct schedtune *css_st(struct cgroup_subsys_state *css) @@ -126,20 +92,6 @@ root_schedtune = { .colocate_update_disabled = false, #endif .prefer_idle = 0, -#ifdef CONFIG_DYNAMIC_STUNE_BOOST - .boost_default = 0, - .sched_boost = 0, - .boost_count = 0, - .active_boost_slots = { - .list = LIST_HEAD_INIT(root_schedtune.active_boost_slots.list), - .idx = 0, - }, - .available_boost_slots = { - .list = LIST_HEAD_INIT(root_schedtune.available_boost_slots.list), - .idx = 0, - }, - .slot_boost = {0}, -#endif /* CONFIG_DYNAMIC_STUNE_BOOST */ }; /* @@ -685,9 +637,6 @@ boost_write(struct cgroup_subsys_state *css, struct cftype *cft, return -EINVAL; st->boost = boost; -#ifdef CONFIG_DYNAMIC_STUNE_BOOST - st->boost_default = boost; -#endif /* CONFIG_DYNAMIC_STUNE_BOOST */ /* Update CPU boost */ schedtune_boostgroup_update(st->idx, st->boost); @@ -695,63 +644,6 @@ boost_write(struct cgroup_subsys_state *css, struct cftype *cft, return 0; } -#ifdef CONFIG_DYNAMIC_STUNE_BOOST -static s64 -sched_boost_read(struct cgroup_subsys_state *css, struct cftype *cft) -{ - struct schedtune *st = css_st(css); - - return st->sched_boost; -} - -static int -sched_boost_write(struct cgroup_subsys_state *css, struct cftype *cft, - s64 sched_boost) -{ - struct schedtune *st = css_st(css); - st->sched_boost = sched_boost; - - return 0; -} - -static void -boost_slots_init(struct schedtune *st) -{ - int i; - struct boost_slot *slot; - - /* Initialize boost slots */ - INIT_LIST_HEAD(&(st->active_boost_slots.list)); - INIT_LIST_HEAD(&(st->available_boost_slots.list)); - - /* Populate available_boost_slots */ - for (i = 0; i < DYNAMIC_BOOST_SLOTS_COUNT; ++i) { - slot = kmalloc(sizeof(*slot), GFP_KERNEL); - slot->idx = i; - list_add_tail(&(slot->list), &(st->available_boost_slots.list)); - } -} - -static void -boost_slots_release(struct schedtune *st) -{ - struct boost_slot *slot, *next_slot; - - list_for_each_entry_safe(slot, next_slot, - &(st->available_boost_slots.list), list) { - list_del(&slot->list); - pr_info("STUNE: freed!\n"); - kfree(slot); - } - list_for_each_entry_safe(slot, next_slot, - &(st->active_boost_slots.list), list) { - list_del(&slot->list); - pr_info("STUNE: freed!\n"); - kfree(slot); - } -} -#endif // CONFIG_DYNAMIC_STUNE_BOOST - static struct cftype files[] = { #ifdef CONFIG_SCHED_WALT { @@ -775,13 +667,6 @@ static struct cftype files[] = { .read_u64 = prefer_idle_read, .write_u64 = prefer_idle_write, }, -#ifdef CONFIG_DYNAMIC_STUNE_BOOST - { - .name = "sched_boost", - .read_s64 = sched_boost_read, - .write_s64 = sched_boost_write, - }, -#endif // CONFIG_DYNAMIC_STUNE_BOOST { } /* terminate */ }; @@ -802,10 +687,6 @@ schedtune_boostgroup_init(struct schedtune *st) bg->group[st->idx].ts = 0; } -#ifdef CONFIG_DYNAMIC_STUNE_BOOST - boost_slots_init(st); -#endif // CONFIG_DYNAMIC_STUNE_BOOST - return 0; } @@ -855,10 +736,6 @@ out: static void schedtune_boostgroup_release(struct schedtune *st) { -#ifdef CONFIG_DYNAMIC_STUNE_BOOST - /* Free dynamic boost slots */ - boost_slots_release(st); -#endif // CONFIG_DYNAMIC_STUNE_BOOST /* Reset this boost group */ schedtune_boostgroup_update(st->idx, 0); @@ -904,224 +781,6 @@ schedtune_init_cgroups(void) schedtune_initialized = true; } -#ifdef CONFIG_DYNAMIC_STUNE_BOOST -static struct schedtune *getSchedtune(char *st_name) -{ - int idx; - - for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx) { - char name_buf[NAME_MAX + 1]; - struct schedtune *st = allocated_group[idx]; - - if (!st) { - pr_warn("SCHEDTUNE: Could not find %s\n", st_name); - break; - } - - cgroup_name(st->css.cgroup, name_buf, sizeof(name_buf)); - if (strncmp(name_buf, st_name, strlen(st_name)) == 0) - return st; - } - - return NULL; -} - -static int dynamic_boost(struct schedtune *st, int boost) -{ - int ret; - /* Backup boost_default */ - int boost_default_backup = st->boost_default; - - ret = boost_write(&st->css, NULL, boost); - - /* Restore boost_default */ - st->boost_default = boost_default_backup; - - return ret; -} - -static inline bool is_valid_boost_slot(int slot) -{ - return slot >= 0 && slot < DYNAMIC_BOOST_SLOTS_COUNT; -} - -static int activate_boost_slot(struct schedtune *st, int boost, int *slot) -{ - int ret = 0; - struct boost_slot *curr_slot; - struct list_head *head; - *slot = -1; - - mutex_lock(&boost_slot_mutex); - - /* Check for slots in available_boost_slots */ - if (list_empty(&(st->available_boost_slots.list))) { - ret = -EINVAL; - goto exit; - } - - /* - * Move one slot from available_boost_slots to active_boost_slots - */ - - /* Get first slot from available_boost_slots */ - head = &(st->available_boost_slots.list); - curr_slot = list_first_entry(head, struct boost_slot, list); - - /* Store slot value and boost value*/ - *slot = curr_slot->idx; - st->slot_boost[*slot] = boost; - - /* Delete slot from available_boost_slots */ - list_del(&curr_slot->list); - kfree(curr_slot); - - /* Create new slot with same value at tail of active_boost_slots */ - curr_slot = kmalloc(sizeof(*curr_slot), GFP_KERNEL); - curr_slot->idx = *slot; - list_add_tail(&(curr_slot->list), - &(st->active_boost_slots.list)); - -exit: - mutex_unlock(&boost_slot_mutex); - return ret; -} - -static int deactivate_boost_slot(struct schedtune *st, int slot) -{ - int ret = 0; - struct boost_slot *curr_slot, *next_slot; - - mutex_lock(&boost_slot_mutex); - - if (!is_valid_boost_slot(slot)) { - ret = -EINVAL; - goto exit; - } - - /* Delete slot from active_boost_slots */ - list_for_each_entry_safe(curr_slot, next_slot, - &(st->active_boost_slots.list), list) { - if (curr_slot->idx == slot) { - st->slot_boost[slot] = 0; - list_del(&curr_slot->list); - kfree(curr_slot); - - /* Create same slot at tail of available_boost_slots */ - curr_slot = kmalloc(sizeof(*curr_slot), GFP_KERNEL); - curr_slot->idx = slot; - list_add_tail(&(curr_slot->list), - &(st->available_boost_slots.list)); - - goto exit; - } - } - - /* Reaching here means that we did not find the slot to delete */ - ret = -EINVAL; - -exit: - mutex_unlock(&boost_slot_mutex); - return ret; -} - -static int max_active_boost(struct schedtune *st) -{ - struct boost_slot *slot; - int max_boost; - - mutex_lock(&boost_slot_mutex); - mutex_lock(&stune_boost_mutex); - - /* Set initial value to default boost */ - max_boost = st->boost_default; - - /* Check for active boosts */ - if (list_empty(&(st->active_boost_slots.list))) { - goto exit; - } - - /* Get largest boost value */ - list_for_each_entry(slot, &(st->active_boost_slots.list), list) { - int boost = st->slot_boost[slot->idx]; - if (boost > max_boost) - max_boost = boost; - } - -exit: - mutex_unlock(&stune_boost_mutex); - mutex_unlock(&boost_slot_mutex); - - return max_boost; -} - -static int _do_stune_boost(struct schedtune *st, int boost, int *slot) -{ - int ret = 0; - - /* Try to obtain boost slot */ - ret = activate_boost_slot(st, boost, slot); - - /* Check if boost slot obtained successfully */ - if (ret) - return -EINVAL; - - /* Boost if new value is greater than current */ - mutex_lock(&stune_boost_mutex); - if (boost > st->boost) - ret = dynamic_boost(st, boost); - mutex_unlock(&stune_boost_mutex); - - return ret; -} - -int reset_stune_boost(char *st_name, int slot) -{ - int ret = 0; - int boost = 0; - struct schedtune *st = getSchedtune(st_name); - - if (!st) - return -EINVAL; - - ret = deactivate_boost_slot(st, slot); - if (ret) { - return -EINVAL; - } - /* Find next largest active boost or reset to default */ - boost = max_active_boost(st); - - mutex_lock(&stune_boost_mutex); - /* Boost only if value changed */ - if (boost != st->boost) - ret = dynamic_boost(st, boost); - mutex_unlock(&stune_boost_mutex); - - return ret; -} - -int do_stune_sched_boost(char *st_name, int *slot) -{ - struct schedtune *st = getSchedtune(st_name); - - if (!st) - return -EINVAL; - - return _do_stune_boost(st, st->sched_boost, slot); -} - -int do_stune_boost(char *st_name, int boost, int *slot) -{ - struct schedtune *st = getSchedtune(st_name); - - if (!st) - return -EINVAL; - - return _do_stune_boost(st, boost, slot); -} - -#endif /* CONFIG_DYNAMIC_STUNE_BOOST */ - /* * Initialize the cgroup structures */