ANDROID: binder: don't check prio permissions on restore.

Because we have disabled RT priority inheritance for
the regular binder domain, the following can happen:

1) thread A (prio 98) calls into thread B
2) because RT prio inheritance is disabled, thread B
   runs at the lowest nice (prio 100) instead
3) thread B calls back into A; A will run at prio 100
   for the duration of the transaction
4) When thread A is done with the call from B, we will
   try to restore the prio back to 98. But, we fail
   because the process doesn't hold CAP_SYS_NICE,
   neither is RLIMIT_RT_PRIO set.

While the proper fix going forward will be to
correctly apply CAP_SYS_NICE or RLIMIT_RT_PRIO,
for now it seems reasonable to not check permissions
on the restore path.

Bug: 62043063
Test: boots
Change-Id: Ibede5960c9b7bb786271c001e405de50be64d944
Signed-off-by: Martijn Coenen <maco@google.com>
This commit is contained in:
Martijn Coenen
2017-05-26 10:48:56 -07:00
parent 92d6d33a36
commit 911673a208

View File

@@ -743,7 +743,8 @@ static int to_kernel_prio(int policy, int user_priority) {
}
static void binder_set_priority(struct task_struct *task,
struct binder_priority desired)
struct binder_priority desired,
bool restore)
{
int priority; /* user-space prio value */
bool has_cap_nice;
@@ -756,7 +757,7 @@ static void binder_set_priority(struct task_struct *task,
priority = to_userspace_prio(policy, desired.prio);
if (is_rt_policy(policy) && !has_cap_nice) {
if (!restore && is_rt_policy(policy) && !has_cap_nice) {
long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
if (max_rtprio == 0) {
policy = SCHED_NORMAL;
@@ -766,7 +767,7 @@ static void binder_set_priority(struct task_struct *task,
}
}
if (is_fair_policy(policy) && !has_cap_nice) {
if (!restore && is_fair_policy(policy) && !has_cap_nice) {
long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
if (min_nice > MAX_NICE) {
binder_user_error("%d RLIMIT_NICE not set\n",
@@ -832,7 +833,7 @@ static void binder_transaction_priority(struct task_struct *task,
desired_prio.prio = NICE_TO_PRIO(MIN_NICE);
}
binder_set_priority(task, desired_prio);
binder_set_priority(task, desired_prio, false /* restore */);
}
static struct binder_node *binder_get_node(struct binder_proc *proc,
@@ -2455,7 +2456,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_proc_unlock(target_thread->proc, __LINE__);
binder_free_transaction(in_reply_to);
wake_up_interruptible_sync(&target_thread->wait);
binder_set_priority(current, saved_priority);
binder_set_priority(current, saved_priority, true /*restore*/);
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
binder_proc_lock(thread->proc, __LINE__);
@@ -2534,7 +2535,7 @@ err_no_context_mgr_node:
binder_enqueue_work(&thread->return_error.work,
&thread->todo, __LINE__);
binder_proc_unlock(thread->proc, __LINE__);
binder_set_priority(current, saved_priority);
binder_set_priority(current, saved_priority, true /*restore*/);
binder_send_failed_reply(in_reply_to, return_error);
} else {
thread->return_error.cmd = return_error;
@@ -3091,7 +3092,8 @@ retry:
if (wait_for_proc_work) {
BUG_ON(!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED)));
binder_set_priority(current, proc->default_priority);
binder_set_priority(current, proc->default_priority,
true /* restore */);
}
if (non_block) {