BACKPORT: FROMGIT: sched: Enforce user requested affinity
It was found that the user requested affinity via sched_setaffinity() can be easily overwritten by other kernel subsystems without an easy way to reset it back to what the user requested. For example, any change to the current cpuset hierarchy may reset the cpumask of the tasks in the affected cpusets to the default cpuset value even if those tasks have pre-existing user requested affinity. That is especially easy to trigger under a cgroup v2 environment where writing "+cpuset" to the root cgroup's cgroup.subtree_control file will reset the cpus affinity of all the processes in the system. That is problematic in a nohz_full environment where the tasks running in the nohz_full CPUs usually have their cpus affinity explicitly set and will behave incorrectly if cpus affinity changes. Fix this problem by looking at user_cpus_ptr in __set_cpus_allowed_ptr() and use it to restrcit the given cpumask unless there is no overlap. In that case, it will fallback to the given one. The SCA_USER flag is reused to indicate intent to set user_cpus_ptr and so user_cpus_ptr masking should be skipped. In addition, masking should also be skipped if any of the SCA_MIGRATE_* flag is set. All callers of set_cpus_allowed_ptr() will be affected by this change. A scratch cpumask is added to percpu runqueues structure for doing additional masking when user_cpus_ptr is set. Change-Id: I2dc567931fbcf33dbd544fbfc7bfb0c35a0feea8 Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20220922180041.1768141-4-longman@redhat.com BUG: 254447891 (cherry picked from commit 02a73f8239de47caa07c8a5f93cb3700ee30f3dc https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git sched/core) [ashayj: Resolved minor conflict in kernel/sched/core.c] Signed-off-by: Ashay Jaiswal <quic_ashayj@quicinc.com>
This commit is contained in:
committed by
Quentin Perret
parent
50a3a47c14
commit
ec8cf94166
@@ -2935,6 +2935,15 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
|
||||
struct rq *rq;
|
||||
|
||||
rq = task_rq_lock(p, &rf);
|
||||
/*
|
||||
* Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
|
||||
* flags are set.
|
||||
*/
|
||||
if (p->user_cpus_ptr &&
|
||||
!(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
|
||||
cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
|
||||
ctx->new_mask = rq->scratch_mask;
|
||||
|
||||
return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
|
||||
}
|
||||
|
||||
@@ -9702,6 +9711,7 @@ void __init sched_init(void)
|
||||
|
||||
rq->core_cookie = 0UL;
|
||||
#endif
|
||||
zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
|
||||
}
|
||||
|
||||
set_load_weight(&init_task, false);
|
||||
|
||||
@@ -1131,6 +1131,9 @@ struct rq {
|
||||
unsigned int core_forceidle_seq;
|
||||
#endif
|
||||
|
||||
/* Scratch cpumask to be temporarily used under rq_lock */
|
||||
cpumask_var_t scratch_mask;
|
||||
|
||||
ANDROID_VENDOR_DATA_ARRAY(1, 96);
|
||||
ANDROID_OEM_DATA_ARRAY(1, 16);
|
||||
|
||||
@@ -1560,6 +1563,7 @@ struct rq_flags {
|
||||
*/
|
||||
unsigned int clock_update_flags;
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
Reference in New Issue
Block a user