ANDROID: sched: Move scratch_mask to a percpu variable
The recently introduced scratch_mask in struct rq breaks the KMI. Turn it into per-cpu table instead, similar to how load_balance_mask or select_idle_mask are allocated in the scheduler. Bug: 254812379 Signed-off-by: Quentin Perret <qperret@google.com> Change-Id: I331336dd13eaaf83be0a4029f89becda45c863f8
This commit is contained in:
@@ -58,6 +58,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_blocked);
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
|
||||
EXPORT_SYMBOL_GPL(runqueues);
|
||||
|
||||
/* Scratch cpumask to be temporarily used under rq_lock */
|
||||
DEFINE_PER_CPU(cpumask_var_t, scratch_mask);
|
||||
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
/*
|
||||
* Debugging: various feature bits
|
||||
@@ -2948,18 +2951,20 @@ out:
|
||||
static int __set_cpus_allowed_ptr(struct task_struct *p,
|
||||
struct affinity_context *ctx)
|
||||
{
|
||||
struct cpumask *cpus;
|
||||
struct rq_flags rf;
|
||||
struct rq *rq;
|
||||
|
||||
rq = task_rq_lock(p, &rf);
|
||||
cpus = per_cpu(scratch_mask, rq->cpu);
|
||||
/*
|
||||
* Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
|
||||
* flags are set.
|
||||
*/
|
||||
if (p->user_cpus_ptr &&
|
||||
!(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
|
||||
cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
|
||||
ctx->new_mask = rq->scratch_mask;
|
||||
cpumask_and(cpus, ctx->new_mask, p->user_cpus_ptr))
|
||||
ctx->new_mask = cpus;
|
||||
|
||||
return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
|
||||
}
|
||||
@@ -9623,6 +9628,8 @@ void __init sched_init(void)
|
||||
cpumask_size(), GFP_KERNEL, cpu_to_node(i));
|
||||
per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node(
|
||||
cpumask_size(), GFP_KERNEL, cpu_to_node(i));
|
||||
per_cpu(scratch_mask, i) = (cpumask_var_t)kzalloc_node(
|
||||
cpumask_size(), GFP_KERNEL, cpu_to_node(i));
|
||||
}
|
||||
#endif /* CONFIG_CPUMASK_OFFSTACK */
|
||||
|
||||
@@ -9728,7 +9735,6 @@ void __init sched_init(void)
|
||||
|
||||
rq->core_cookie = 0UL;
|
||||
#endif
|
||||
zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
|
||||
}
|
||||
|
||||
set_load_weight(&init_task, false);
|
||||
|
||||
@@ -1131,9 +1131,6 @@ struct rq {
|
||||
unsigned int core_forceidle_seq;
|
||||
#endif
|
||||
|
||||
/* Scratch cpumask to be temporarily used under rq_lock */
|
||||
cpumask_var_t scratch_mask;
|
||||
|
||||
ANDROID_VENDOR_DATA_ARRAY(1, 96);
|
||||
ANDROID_OEM_DATA_ARRAY(1, 16);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user