Merge "sched/walt: Improve the scheduler"

This commit is contained in:
qctecmdr
2020-07-28 09:43:10 -07:00
committed by Gerrit - the friendly Code Review server
3 changed files with 31 additions and 30 deletions

View File

@@ -2867,15 +2867,14 @@ extern void clear_top_tasks_bitmap(unsigned long *bitmap);
#if defined(CONFIG_SCHED_TUNE)
extern bool task_sched_boost(struct task_struct *p);
extern int sync_cgroup_colocation(struct task_struct *p, bool insert);
extern bool same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2);
extern bool schedtune_task_colocated(struct task_struct *p);
extern void update_cgroup_boost_settings(void);
extern void restore_cgroup_boost_settings(void);
#else
static inline bool
same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2)
static inline bool schedtune_task_colocated(struct task_struct *p)
{
return true;
return false;
}
static inline bool task_sched_boost(struct task_struct *p)

View File

@@ -150,11 +150,6 @@ static inline void init_sched_boost(struct schedtune *st)
st->colocate_update_disabled = false;
}
bool same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2)
{
return task_schedtune(tsk1) == task_schedtune(tsk2);
}
void update_cgroup_boost_settings(void)
{
int i;
@@ -482,6 +477,23 @@ static int sched_colocate_write(struct cgroup_subsys_state *css,
return 0;
}
bool schedtune_task_colocated(struct task_struct *p)
{
struct schedtune *st;
bool colocated;
if (unlikely(!schedtune_initialized))
return false;
/* Get task boost value */
rcu_read_lock();
st = task_schedtune(p);
colocated = st->colocate;
rcu_read_unlock();
return colocated;
}
#else /* CONFIG_SCHED_WALT */
static inline void init_sched_boost(struct schedtune *st) { }

View File

@@ -2514,7 +2514,6 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
* Enable colocation and frequency aggregation for all threads in a process.
* The children inherits the group id from the parent.
*/
unsigned int __read_mostly sysctl_sched_enable_thread_grouping;
struct related_thread_group *related_thread_groups[MAX_NUM_CGROUP_COLOC_ID];
static LIST_HEAD(active_related_thread_groups);
@@ -2780,34 +2779,25 @@ void add_new_task_to_grp(struct task_struct *new)
{
unsigned long flags;
struct related_thread_group *grp;
struct task_struct *leader = new->group_leader;
unsigned int leader_grp_id = sched_get_group_id(leader);
if (!sysctl_sched_enable_thread_grouping &&
leader_grp_id != DEFAULT_CGROUP_COLOC_ID)
/*
* If the task does not belong to colocated schedtune
* cgroup, nothing to do. We are checking this without
* lock. Even if there is a race, it will be added
* to the co-located cgroup via cgroup attach.
*/
if (!schedtune_task_colocated(new))
return;
if (thread_group_leader(new))
return;
if (leader_grp_id == DEFAULT_CGROUP_COLOC_ID) {
if (!same_schedtune(new, leader))
return;
}
grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);
write_lock_irqsave(&related_thread_group_lock, flags);
rcu_read_lock();
grp = task_related_thread_group(leader);
rcu_read_unlock();
/*
* It's possible that someone already added the new task to the
* group. A leader's thread group is updated prior to calling
* this function. It's also possible that the leader has exited
* the group. In either case, there is nothing else to do.
* group. or it might have taken out from the colocated schedtune
* cgroup. check these conditions under lock.
*/
if (!grp || new->grp) {
if (!schedtune_task_colocated(new) || new->grp) {
write_unlock_irqrestore(&related_thread_group_lock, flags);
return;
}