From 7764b501b90efb2219dd73fd528274a3a5a380a6 Mon Sep 17 00:00:00 2001 From: Pavankumar Kondeti Date: Thu, 16 Jul 2020 00:49:59 +0530 Subject: [PATCH] sched/walt: Improve the scheduler This change is for general scheduler improvements. Change-Id: I84f84973b815fb0bb95d1d950a4ac90be11f9470 Signed-off-by: Pavankumar Kondeti --- kernel/sched/sched.h | 7 +++---- kernel/sched/tune.c | 22 +++++++++++++++++----- kernel/sched/walt.c | 32 +++++++++++--------------------- 3 files changed, 31 insertions(+), 30 deletions(-) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 38aae2631f1d..98aa8c1a6eb8 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2867,15 +2867,14 @@ extern void clear_top_tasks_bitmap(unsigned long *bitmap); #if defined(CONFIG_SCHED_TUNE) extern bool task_sched_boost(struct task_struct *p); extern int sync_cgroup_colocation(struct task_struct *p, bool insert); -extern bool same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2); +extern bool schedtune_task_colocated(struct task_struct *p); extern void update_cgroup_boost_settings(void); extern void restore_cgroup_boost_settings(void); #else -static inline bool -same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2) +static inline bool schedtune_task_colocated(struct task_struct *p) { - return true; + return false; } static inline bool task_sched_boost(struct task_struct *p) diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c index 51862ccdb719..5670542c774e 100644 --- a/kernel/sched/tune.c +++ b/kernel/sched/tune.c @@ -150,11 +150,6 @@ static inline void init_sched_boost(struct schedtune *st) st->colocate_update_disabled = false; } -bool same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2) -{ - return task_schedtune(tsk1) == task_schedtune(tsk2); -} - void update_cgroup_boost_settings(void) { int i; @@ -482,6 +477,23 @@ static int sched_colocate_write(struct cgroup_subsys_state *css, return 0; } +bool schedtune_task_colocated(struct task_struct *p) +{ + struct schedtune *st; + bool colocated; + + if (unlikely(!schedtune_initialized)) + return false; + + /* Get task boost value */ + rcu_read_lock(); + st = task_schedtune(p); + colocated = st->colocate; + rcu_read_unlock(); + + return colocated; +} + #else /* CONFIG_SCHED_WALT */ static inline void init_sched_boost(struct schedtune *st) { } diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c index fbf12f07b4dc..be19e3b0c25d 100644 --- a/kernel/sched/walt.c +++ b/kernel/sched/walt.c @@ -2514,7 +2514,6 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp, * Enable colocation and frequency aggregation for all threads in a process. * The children inherits the group id from the parent. */ -unsigned int __read_mostly sysctl_sched_enable_thread_grouping; struct related_thread_group *related_thread_groups[MAX_NUM_CGROUP_COLOC_ID]; static LIST_HEAD(active_related_thread_groups); @@ -2780,34 +2779,25 @@ void add_new_task_to_grp(struct task_struct *new) { unsigned long flags; struct related_thread_group *grp; - struct task_struct *leader = new->group_leader; - unsigned int leader_grp_id = sched_get_group_id(leader); - if (!sysctl_sched_enable_thread_grouping && - leader_grp_id != DEFAULT_CGROUP_COLOC_ID) + /* + * If the task does not belong to colocated schedtune + * cgroup, nothing to do. We are checking this without + * lock. Even if there is a race, it will be added + * to the co-located cgroup via cgroup attach. + */ + if (!schedtune_task_colocated(new)) return; - if (thread_group_leader(new)) - return; - - if (leader_grp_id == DEFAULT_CGROUP_COLOC_ID) { - if (!same_schedtune(new, leader)) - return; - } - + grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID); write_lock_irqsave(&related_thread_group_lock, flags); - rcu_read_lock(); - grp = task_related_thread_group(leader); - rcu_read_unlock(); - /* * It's possible that someone already added the new task to the - * group. A leader's thread group is updated prior to calling - * this function. It's also possible that the leader has exited - * the group. In either case, there is nothing else to do. + * group. or it might have taken out from the colocated schedtune + * cgroup. check these conditions under lock. */ - if (!grp || new->grp) { + if (!schedtune_task_colocated(new) || new->grp) { write_unlock_irqrestore(&related_thread_group_lock, flags); return; }