ANDROID: sched: Keep sched_class::set_cpus_allowed stable

The sched_class struct is part of the stable KMI, so let's revert to the
protoype of ->set_cpus_allowed() prior to the introduction of struct
affinity_context.

To do so, hardcode the only use of the indirected set_cpus_allowed()
callback in core.c to pass the affinity_context around without touching
sched_class. There should be no other users of ->set_cpus_allowed() in
the kernel tree, so the only other potential users are out-of-tree
modules. There is no obvious way to deal with them, so provide a
set_cpus_allowed_common_cb() wrapper which construct the affinity
context on the stack without the user_mask which will work in best
effort, and WARN() to make sure potential users are aware.

Bug: 254812379
Signed-off-by: Quentin Perret <qperret@google.com>
Change-Id: I6f096006c0a4b74a9c1ed0553739253bfd175c25
This commit is contained in:
Quentin Perret
2022-10-24 09:00:59 +00:00
parent 448eb7ef25
commit c61797f241
7 changed files with 45 additions and 9 deletions

View File

@@ -2568,7 +2568,18 @@ __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
if (running)
put_prev_task(rq, p);
p->sched_class->set_cpus_allowed(p, ctx);
/*
* XXX: ANDROID: we can't use sched_class::set_cpus_allowed() here
* because it doesn't take a struct affinity_context as parameter for
* GKI KMI stability reason -- see b/254812379. To avoid the problem,
* let's hardcode the indirection here and hope for the best. The only
* other potential users of p->set_cpus_allowed() will be in vendor
* modules.
*/
if (likely(p->sched_class != &dl_sched_class))
set_cpus_allowed_common(p, ctx);
else
set_cpus_allowed_dl(p, ctx);
if (queued)
enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);

View File

@@ -2337,8 +2337,7 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p)
}
}
static void set_cpus_allowed_dl(struct task_struct *p,
struct affinity_context *ctx)
void set_cpus_allowed_dl(struct task_struct *p, struct affinity_context *ctx)
{
struct root_domain *src_rd;
struct rq *rq;
@@ -2370,6 +2369,18 @@ static void set_cpus_allowed_dl(struct task_struct *p,
set_cpus_allowed_common(p, ctx);
}
static void set_cpus_allowed_dl_cb(struct task_struct *p,
const struct cpumask *new_mask,
u32 flags)
{
struct affinity_context ac = {
.new_mask = new_mask,
.flags = flags,
};
WARN_ONCE(1, "Unexpected use of dl_sched_class::set_cpus_allowed()");
set_cpus_allowed_dl(p, &ac);
}
/* Assumes rq->lock is held */
static void rq_online_dl(struct rq *rq)
{
@@ -2562,7 +2573,7 @@ DEFINE_SCHED_CLASS(dl) = {
.pick_task = pick_task_dl,
.select_task_rq = select_task_rq_dl,
.migrate_task_rq = migrate_task_rq_dl,
.set_cpus_allowed = set_cpus_allowed_dl,
.set_cpus_allowed = set_cpus_allowed_dl_cb,
.rq_online = rq_online_dl,
.rq_offline = rq_offline_dl,
.task_woken = task_woken_dl,

View File

@@ -11842,7 +11842,7 @@ DEFINE_SCHED_CLASS(fair) = {
.rq_offline = rq_offline_fair,
.task_dead = task_dead_fair,
.set_cpus_allowed = set_cpus_allowed_common,
.set_cpus_allowed = set_cpus_allowed_common_cb,
#endif
.task_tick = task_tick_fair,

View File

@@ -516,7 +516,7 @@ DEFINE_SCHED_CLASS(idle) = {
.balance = balance_idle,
.pick_task = pick_task_idle,
.select_task_rq = select_task_rq_idle,
.set_cpus_allowed = set_cpus_allowed_common,
.set_cpus_allowed = set_cpus_allowed_common_cb,
#endif
.task_tick = task_tick_idle,

View File

@@ -2631,7 +2631,7 @@ DEFINE_SCHED_CLASS(rt) = {
.balance = balance_rt,
.pick_task = pick_task_rt,
.select_task_rq = select_task_rq_rt,
.set_cpus_allowed = set_cpus_allowed_common,
.set_cpus_allowed = set_cpus_allowed_common_cb,
.rq_online = rq_online_rt,
.rq_offline = rq_offline_rt,
.task_woken = task_woken_rt,

View File

@@ -2207,7 +2207,9 @@ struct sched_class {
void (*task_woken)(struct rq *this_rq, struct task_struct *task);
void (*set_cpus_allowed)(struct task_struct *p, struct affinity_context *ctx);
void (*set_cpus_allowed)(struct task_struct *p,
const struct cpumask *newmask,
u32 flags);
void (*rq_online)(struct rq *rq);
void (*rq_offline)(struct rq *rq);
@@ -2320,8 +2322,20 @@ extern void update_group_capacity(struct sched_domain *sd, int cpu);
extern void trigger_load_balance(struct rq *rq);
extern void set_cpus_allowed_dl(struct task_struct *p, struct affinity_context *ctx);
extern void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx);
static inline void set_cpus_allowed_common_cb(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
{
struct affinity_context ac = {
.new_mask = new_mask,
.flags = flags,
};
WARN_ONCE(1, "Unexpected use of sched_class::set_cpus_allowed()");
set_cpus_allowed_common(p, &ac);
}
static inline struct task_struct *get_push_task(struct rq *rq)
{
struct task_struct *p = rq->curr;

View File

@@ -134,7 +134,7 @@ DEFINE_SCHED_CLASS(stop) = {
.balance = balance_stop,
.pick_task = pick_task_stop,
.select_task_rq = select_task_rq_stop,
.set_cpus_allowed = set_cpus_allowed_common,
.set_cpus_allowed = set_cpus_allowed_common_cb,
#endif
.task_tick = task_tick_stop,