Revert "sched/walt: Improve the scheduler"

This reverts commit 7764b501b9.

Signed-off-by: UtsavBalar1231 <utsavbalar1231@gmail.com>
This commit is contained in:
UtsavBalar1231
2020-08-30 00:03:02 +05:30
parent 62ff5031f7
commit c7a1370d89
3 changed files with 30 additions and 31 deletions

View File

@@ -2647,14 +2647,15 @@ extern void clear_top_tasks_bitmap(unsigned long *bitmap);
#if defined(CONFIG_SCHED_TUNE)
extern bool task_sched_boost(struct task_struct *p);
extern int sync_cgroup_colocation(struct task_struct *p, bool insert);
extern bool schedtune_task_colocated(struct task_struct *p);
extern bool same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2);
extern void update_cgroup_boost_settings(void);
extern void restore_cgroup_boost_settings(void);
#else
static inline bool schedtune_task_colocated(struct task_struct *p)
static inline bool
same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2)
{
return false;
return true;
}
static inline bool task_sched_boost(struct task_struct *p)

View File

@@ -150,6 +150,11 @@ static inline void init_sched_boost(struct schedtune *st)
st->colocate_update_disabled = false;
}
bool same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2)
{
return task_schedtune(tsk1) == task_schedtune(tsk2);
}
void update_cgroup_boost_settings(void)
{
int i;
@@ -477,23 +482,6 @@ static int sched_colocate_write(struct cgroup_subsys_state *css,
return 0;
}
bool schedtune_task_colocated(struct task_struct *p)
{
struct schedtune *st;
bool colocated;
if (unlikely(!schedtune_initialized))
return false;
/* Get task boost value */
rcu_read_lock();
st = task_schedtune(p);
colocated = st->colocate;
rcu_read_unlock();
return colocated;
}
#else /* CONFIG_SCHED_WALT */
static inline void init_sched_boost(struct schedtune *st) { }

View File

@@ -2663,6 +2663,7 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
* The children inherits the group id from the parent.
*/
unsigned int __read_mostly sysctl_sched_coloc_downmigrate_ns;
unsigned int __read_mostly sysctl_sched_enable_thread_grouping;
struct related_thread_group *related_thread_groups[MAX_NUM_CGROUP_COLOC_ID];
static LIST_HEAD(active_related_thread_groups);
@@ -2933,25 +2934,34 @@ void add_new_task_to_grp(struct task_struct *new)
{
unsigned long flags;
struct related_thread_group *grp;
struct task_struct *leader = new->group_leader;
unsigned int leader_grp_id = sched_get_group_id(leader);
/*
* If the task does not belong to colocated schedtune
* cgroup, nothing to do. We are checking this without
* lock. Even if there is a race, it will be added
* to the co-located cgroup via cgroup attach.
*/
if (!schedtune_task_colocated(new))
if (!sysctl_sched_enable_thread_grouping &&
leader_grp_id != DEFAULT_CGROUP_COLOC_ID)
return;
grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);
if (thread_group_leader(new))
return;
if (leader_grp_id == DEFAULT_CGROUP_COLOC_ID) {
if (!same_schedtune(new, leader))
return;
}
write_lock_irqsave(&related_thread_group_lock, flags);
rcu_read_lock();
grp = task_related_thread_group(leader);
rcu_read_unlock();
/*
* It's possible that someone already added the new task to the
* group. or it might have taken out from the colocated schedtune
* cgroup. check these conditions under lock.
* group. A leader's thread group is updated prior to calling
* this function. It's also possible that the leader has exited
* the group. In either case, there is nothing else to do.
*/
if (!schedtune_task_colocated(new) || new->grp) {
if (!grp || new->grp) {
write_unlock_irqrestore(&related_thread_group_lock, flags);
return;
}