BACKPORT: FROMGIT: sched: Always preserve the user requested cpumask
Unconditionally preserve the user requested cpumask on sched_setaffinity() calls. This allows using it outside of the fairly narrow restrict_cpus_allowed_ptr() use-case and fix some cpuset issues that currently suffer destruction of cpumasks. Change-Id: I24bb8639d91785e308188837b202a12fa3142d43 Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20220922180041.1768141-3-longman@redhat.com BUG: 254447891 (cherry picked from commit c2a8c7b835ad6c72a1ff29d211f3aed454b29183 https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git sched/core) [ashayj: Resolved minor conflict in kernel/sched/core.c] Signed-off-by: Ashay Jaiswal <quic_ashayj@quicinc.com>
This commit is contained in:
committed by
Quentin Perret
parent
54aeb5c372
commit
50a3a47c14
@@ -2521,6 +2521,13 @@ void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx
|
||||
|
||||
cpumask_copy(&p->cpus_mask, ctx->new_mask);
|
||||
p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
|
||||
|
||||
/*
|
||||
* Swap in a new user_cpus_ptr if SCA_USER flag set
|
||||
*/
|
||||
if (ctx->flags & SCA_USER)
|
||||
swap(p->user_cpus_ptr, ctx->user_mask);
|
||||
|
||||
trace_android_rvh_set_cpus_allowed_comm(p, ctx->new_mask);
|
||||
}
|
||||
|
||||
@@ -2582,6 +2589,8 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
|
||||
int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
|
||||
int node)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!src->user_cpus_ptr)
|
||||
return 0;
|
||||
|
||||
@@ -2589,7 +2598,10 @@ int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
|
||||
if (!dst->user_cpus_ptr)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Use pi_lock to protect content of user_cpus_ptr */
|
||||
raw_spin_lock_irqsave(&src->pi_lock, flags);
|
||||
cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
|
||||
raw_spin_unlock_irqrestore(&src->pi_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2838,7 +2850,6 @@ static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
|
||||
const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
|
||||
const struct cpumask *cpu_valid_mask = cpu_active_mask;
|
||||
bool kthread = p->flags & PF_KTHREAD;
|
||||
struct cpumask *user_mask = NULL;
|
||||
unsigned int dest_cpu;
|
||||
int ret = 0;
|
||||
|
||||
@@ -2900,14 +2911,7 @@ static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
|
||||
|
||||
__do_set_cpus_allowed(p, ctx);
|
||||
|
||||
if (ctx->flags & SCA_USER)
|
||||
user_mask = clear_user_cpus_ptr(p);
|
||||
|
||||
ret = affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
|
||||
|
||||
kfree(user_mask);
|
||||
|
||||
return ret;
|
||||
return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
|
||||
|
||||
out:
|
||||
task_rq_unlock(rq, p, rf);
|
||||
@@ -2947,8 +2951,10 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
|
||||
|
||||
/*
|
||||
* Change a given task's CPU affinity to the intersection of its current
|
||||
* affinity mask and @subset_mask, writing the resulting mask to @new_mask
|
||||
* and pointing @p->user_cpus_ptr to a copy of the old mask.
|
||||
* affinity mask and @subset_mask, writing the resulting mask to @new_mask.
|
||||
* If user_cpus_ptr is defined, use it as the basis for restricting CPU
|
||||
* affinity or use cpu_online_mask instead.
|
||||
*
|
||||
* If the resulting mask is empty, leave the affinity unchanged and return
|
||||
* -EINVAL.
|
||||
*/
|
||||
@@ -2956,18 +2962,14 @@ static int restrict_cpus_allowed_ptr(struct task_struct *p,
|
||||
struct cpumask *new_mask,
|
||||
const struct cpumask *subset_mask)
|
||||
{
|
||||
struct cpumask *user_mask = NULL;
|
||||
struct affinity_context ac;
|
||||
struct affinity_context ac = {
|
||||
.new_mask = new_mask,
|
||||
.flags = 0,
|
||||
};
|
||||
struct rq_flags rf;
|
||||
struct rq *rq;
|
||||
int err;
|
||||
|
||||
if (!p->user_cpus_ptr) {
|
||||
user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
|
||||
if (!user_mask)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rq = task_rq_lock(p, &rf);
|
||||
|
||||
/*
|
||||
@@ -2980,29 +2982,15 @@ static int restrict_cpus_allowed_ptr(struct task_struct *p,
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) {
|
||||
if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
|
||||
err = -EINVAL;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* We're about to butcher the task affinity, so keep track of what
|
||||
* the user asked for in case we're able to restore it later on.
|
||||
*/
|
||||
if (user_mask) {
|
||||
cpumask_copy(user_mask, p->cpus_ptr);
|
||||
p->user_cpus_ptr = user_mask;
|
||||
}
|
||||
|
||||
ac = (struct affinity_context){
|
||||
.new_mask = new_mask,
|
||||
};
|
||||
|
||||
return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
|
||||
|
||||
err_unlock:
|
||||
task_rq_unlock(rq, p, &rf);
|
||||
kfree(user_mask);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -3058,33 +3046,25 @@ __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);
|
||||
|
||||
/*
|
||||
* Restore the affinity of a task @p which was previously restricted by a
|
||||
* call to force_compatible_cpus_allowed_ptr(). This will clear (and free)
|
||||
* @p->user_cpus_ptr.
|
||||
* call to force_compatible_cpus_allowed_ptr().
|
||||
*
|
||||
* It is the caller's responsibility to serialise this with any calls to
|
||||
* force_compatible_cpus_allowed_ptr(@p).
|
||||
*/
|
||||
void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
|
||||
{
|
||||
struct cpumask *user_mask = p->user_cpus_ptr;
|
||||
struct affinity_context ac = {
|
||||
.new_mask = user_mask,
|
||||
.new_mask = task_user_cpus(p),
|
||||
.flags = 0,
|
||||
};
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Try to restore the old affinity mask. If this fails, then
|
||||
* we free the mask explicitly to avoid it being inherited across
|
||||
* a subsequent fork().
|
||||
* Try to restore the old affinity mask with __sched_setaffinity().
|
||||
* Cpuset masking will be done there too.
|
||||
*/
|
||||
if (!user_mask || !__sched_setaffinity(p, &ac))
|
||||
return;
|
||||
|
||||
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
||||
user_mask = clear_user_cpus_ptr(p);
|
||||
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
||||
|
||||
kfree(user_mask);
|
||||
ret = __sched_setaffinity(p, &ac);
|
||||
WARN_ON_ONCE(ret);
|
||||
}
|
||||
|
||||
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
||||
@@ -8176,7 +8156,7 @@ __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
|
||||
retval = dl_task_check_affinity(p, new_mask);
|
||||
if (retval)
|
||||
goto out_free_new_mask;
|
||||
again:
|
||||
|
||||
retval = __set_cpus_allowed_ptr(p, ctx);
|
||||
if (retval)
|
||||
goto out_free_new_mask;
|
||||
@@ -8188,7 +8168,24 @@ again:
|
||||
* Just reset the cpumask to the cpuset's cpus_allowed.
|
||||
*/
|
||||
cpumask_copy(new_mask, cpus_allowed);
|
||||
goto again;
|
||||
|
||||
/*
|
||||
* If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
|
||||
* will restore the previous user_cpus_ptr value.
|
||||
*
|
||||
* In the unlikely event a previous user_cpus_ptr exists,
|
||||
* we need to further restrict the mask to what is allowed
|
||||
* by that old user_cpus_ptr.
|
||||
*/
|
||||
if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
|
||||
bool empty = !cpumask_and(new_mask, new_mask,
|
||||
ctx->user_mask);
|
||||
|
||||
if (WARN_ON_ONCE(empty))
|
||||
cpumask_copy(new_mask, cpus_allowed);
|
||||
}
|
||||
__set_cpus_allowed_ptr(p, ctx);
|
||||
retval = -EINVAL;
|
||||
}
|
||||
|
||||
out_free_new_mask:
|
||||
@@ -8200,9 +8197,8 @@ out_free_cpus_allowed:
|
||||
|
||||
long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
|
||||
{
|
||||
struct affinity_context ac = {
|
||||
.new_mask = in_mask,
|
||||
};
|
||||
struct affinity_context ac;
|
||||
struct cpumask *user_mask;
|
||||
struct task_struct *p;
|
||||
int retval = 0;
|
||||
int skip = 0;
|
||||
@@ -8241,8 +8237,21 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
|
||||
if (retval)
|
||||
goto out_put_task;
|
||||
|
||||
user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
|
||||
if (!user_mask) {
|
||||
retval = -ENOMEM;
|
||||
goto out_put_task;
|
||||
}
|
||||
cpumask_copy(user_mask, in_mask);
|
||||
ac = (struct affinity_context){
|
||||
.new_mask = in_mask,
|
||||
.user_mask = user_mask,
|
||||
.flags = SCA_USER,
|
||||
};
|
||||
|
||||
retval = __sched_setaffinity(p, &ac);
|
||||
trace_android_rvh_sched_setaffinity(p, ac.new_mask, &retval);
|
||||
kfree(ac.user_mask);
|
||||
|
||||
out_put_task:
|
||||
put_task_struct(p);
|
||||
|
||||
@@ -1905,6 +1905,13 @@ static inline void dirty_sched_domain_sysctl(int cpu)
|
||||
|
||||
extern int sched_update_scaling(void);
|
||||
|
||||
static inline const struct cpumask *task_user_cpus(struct task_struct *p)
|
||||
{
|
||||
if (!p->user_cpus_ptr)
|
||||
return cpu_possible_mask; /* &init_task.cpus_mask */
|
||||
return p->user_cpus_ptr;
|
||||
}
|
||||
|
||||
extern void flush_smp_call_function_from_idle(void);
|
||||
|
||||
#else /* !CONFIG_SMP: */
|
||||
@@ -2164,6 +2171,7 @@ extern const u32 sched_prio_to_wmult[40];
|
||||
|
||||
struct affinity_context {
|
||||
const struct cpumask *new_mask;
|
||||
struct cpumask *user_mask;
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
|
||||
Reference in New Issue
Block a user