LTS: Update Android binder code on Wahoo QT-QPR2

Cherry-pick (and squash) the following commits from wahoo-4.4 master:
* e57c9b42bc ANDROID: binder: eliminate diff between wahoo-4.4 and android-4.4-p
* 7c78280ad5 ANDROID: fix binder change in merge of 4.4.183
* 8c8becfb50 UPSTREAM: ANDROID: binder: correct the cmd print for BINDER_WORK_RETURN_ERROR
* 8d8dba571e UPSTREAM: ANDROID: binder: change down_write to down_read
* 8cfdf78fdf UPSTREAM: ANDROID: binder: re-order some conditions
* f9370f1680 UPSTREAM: ANDROID: binder: prevent transactions into own process.
* a97cdebfdb UPSTREAM: ANDROID: binder: synchronize_rcu() when using POLLFREE.
* 859f6fff3f UPSTREAM: binder: replace "%p" with "%pK"
* 139e6cba1b UPSTREAM: ANDROID: binder: remove WARN() for redundant txn error
* a632667db7 UPSTREAM: android: binder: Use true and false for boolean values
* 7b1d48ac5f UPSTREAM: android: binder: use VM_ALLOC to get vm area
* 6de52ef629 UPSTREAM: android: binder: Prefer __func__ to using hardcoded function name
* 423c3f5c01 UPSTREAM: android: binder: Use octal permissions
* 16cbb23d7b ANDROID: binder: Remove obsolete proc waitqueue.
* 414890c036 UPSTREAM: ANDROID: binder: make binder_alloc_new_buf_locked static and indent its arguments
* 6b044a7d70 UPSTREAM: android: binder: Check for errors in binder_alloc_shrinker_init().
* ce8d66c2cb ANDROID: binder: clarify deferred thread work.
* bfa9d65d0c UPSTREAM: android: binder: fix type mismatch warning
* a140fcd9c9 UPSTREAM: binder: free memory on error

The above commits originate from android-4.4-p common kernel.
They were tested and committed to wahoo-4.4 master first.
Now bringing the same changes to Wahoo QT-QPR2.

Bug: 115649143
Change-Id: If2bd1da782c6d878b6d04e32ca7a553980089ede
Signed-off-by: Petri Gynther <pgynther@google.com>
This commit is contained in:
Petri Gynther
2019-11-18 10:22:13 -08:00
parent 16370885ed
commit 2c5ee42de8
3 changed files with 99 additions and 56 deletions

View File

@@ -143,7 +143,7 @@ enum {
};
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
module_param_named(debug_mask, binder_debug_mask, uint, 0644);
static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
module_param_named(devices, binder_devices_param, charp, S_IRUGO);
@@ -162,7 +162,7 @@ static int binder_set_stop_on_user_error(const char *val,
return ret;
}
module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
param_get_int, &binder_stop_on_user_error, 0644);
#define binder_debug(mask, x...) \
do { \
@@ -251,7 +251,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
unsigned int cur = atomic_inc_return(&log->cur);
if (cur >= ARRAY_SIZE(log->entry))
log->full = 1;
log->full = true;
e = &log->entry[cur % ARRAY_SIZE(log->entry)];
WRITE_ONCE(e->debug_id_done, 0);
/*
@@ -520,8 +520,6 @@ struct binder_priority {
* (protected by @inner_lock)
* @todo: list of work for this process
* (protected by @inner_lock)
* @wait: wait queue head to wait for proc work
* (invariant after initialized)
* @stats: per-process binder statistics
* (atomics, no lock needed)
* @delivered_death: list of delivered death notification
@@ -565,7 +563,6 @@ struct binder_proc {
bool is_dead;
struct list_head todo;
wait_queue_head_t wait;
struct binder_stats stats;
struct list_head delivered_death;
int max_threads;
@@ -844,7 +841,7 @@ binder_enqueue_work_ilocked(struct binder_work *work,
}
/**
* binder_enqueue_thread_work_ilocked_nowake() - Add thread work
* binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
* @thread: thread to queue work to
* @work: struct binder_work to add to list
*
@@ -855,8 +852,8 @@ binder_enqueue_work_ilocked(struct binder_work *work,
* Requires the proc->inner_lock to be held.
*/
static void
binder_enqueue_thread_work_ilocked_nowake(struct binder_thread *thread,
struct binder_work *work)
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
struct binder_work *work)
{
binder_enqueue_work_ilocked(work, &thread->todo);
}
@@ -2222,8 +2219,14 @@ static void binder_send_failed_reply(struct binder_transaction *t,
&target_thread->reply_error.work);
wake_up_interruptible(&target_thread->wait);
} else {
WARN(1, "Unexpected reply error: %u\n",
target_thread->reply_error.cmd);
/*
* Cannot get here for normal operation, but
* we can if multiple synchronous transactions
* are sent without blocking for responses.
* Just ignore the 2nd error in this case.
*/
pr_warn("Unexpected reply error: %u\n",
target_thread->reply_error.cmd);
}
binder_inner_proc_unlock(target_thread->proc);
binder_thread_dec_tmpref(target_thread);
@@ -2283,8 +2286,8 @@ static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
struct binder_object_header *hdr;
size_t object_size = 0;
if (offset > buffer->data_size - sizeof(*hdr) ||
buffer->data_size < sizeof(*hdr) ||
if (buffer->data_size < sizeof(*hdr) ||
offset > buffer->data_size - sizeof(*hdr) ||
!IS_ALIGNED(offset, sizeof(u32)))
return 0;
@@ -2424,7 +2427,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
int debug_id = buffer->debug_id;
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d buffer release %d, size %zd-%zd, failed at %p\n",
"%d buffer release %d, size %zd-%zd, failed at %pK\n",
proc->pid, buffer->debug_id,
buffer->data_size, buffer->offsets_size, failed_at);
@@ -2539,7 +2542,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
debug_id, (u64)fda->num_fds);
continue;
}
fd_array = (u32 *)(parent_buffer + fda->parent_offset);
fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
task_close_fd(proc, fd_array[fd_index]);
} break;
@@ -2763,7 +2766,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
*/
parent_buffer = parent->buffer -
binder_alloc_get_user_buffer_offset(&target_proc->alloc);
fd_array = (u32 *)(parent_buffer + fda->parent_offset);
fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n",
proc->pid, thread->pid);
@@ -2829,7 +2832,7 @@ static int binder_fixup_parent(struct binder_transaction *t,
proc->pid, thread->pid);
return -EINVAL;
}
parent_buffer = (u8 *)(parent->buffer -
parent_buffer = (u8 *)((uintptr_t)parent->buffer -
binder_alloc_get_user_buffer_offset(
&target_proc->alloc));
*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
@@ -2873,7 +2876,7 @@ static bool binder_proc_transaction(struct binder_transaction *t,
if (node->has_async_transaction) {
pending_async = true;
} else {
node->has_async_transaction = 1;
node->has_async_transaction = true;
}
}
@@ -3072,6 +3075,14 @@ static void binder_transaction(struct binder_proc *proc,
else
return_error = BR_DEAD_REPLY;
mutex_unlock(&context->context_mgr_node_lock);
if (target_node && target_proc == proc) {
binder_user_error("%d:%d got transaction to context manager from process owning it\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_invalid_target_handle;
}
}
if (!target_node) {
/*
@@ -3235,7 +3246,7 @@ static void binder_transaction(struct binder_proc *proc,
ALIGN(secctx_sz, sizeof(u64));
char *kptr = t->buffer->data + buf_offset;
t->security_ctx = (binder_uintptr_t)kptr +
t->security_ctx = (uintptr_t)kptr +
binder_alloc_get_user_buffer_offset(&target_proc->alloc);
memcpy(kptr, secctx, secctx_sz);
security_release_secctx(secctx, secctx_sz);
@@ -3276,9 +3287,9 @@ static void binder_transaction(struct binder_proc *proc,
goto err_bad_offset;
}
if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
binder_user_error("%d:%d got transaction with unaligned buffers size, %llu\n",
binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
proc->pid, thread->pid,
extra_buffers_size);
(u64)extra_buffers_size);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
@@ -3457,7 +3468,14 @@ static void binder_transaction(struct binder_proc *proc,
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
binder_inner_proc_lock(proc);
binder_enqueue_thread_work_ilocked_nowake(thread, tcomplete);
/*
* Defer the TRANSACTION_COMPLETE, so we don't return to
* userspace immediately; this allows the target process to
* immediately start processing this transaction, reducing
* latency. We will then return the TRANSACTION_COMPLETE when
* the target replies (or there is an error).
*/
binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
@@ -3771,7 +3789,7 @@ static int binder_thread_write(struct binder_proc *proc,
w = binder_dequeue_work_head_ilocked(
&buf_node->async_todo);
if (!w) {
buf_node->has_async_transaction = 0;
buf_node->has_async_transaction = false;
} else {
binder_enqueue_work_ilocked(
w, &proc->todo);
@@ -3993,7 +4011,7 @@ static int binder_thread_write(struct binder_proc *proc,
}
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
"%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
proc->pid, thread->pid, (u64)cookie,
death);
if (death == NULL) {
@@ -4201,6 +4219,7 @@ retry:
binder_inner_proc_unlock(proc);
if (put_user(e->cmd, (uint32_t __user *)ptr))
return -EFAULT;
cmd = e->cmd;
e->cmd = BR_OK;
ptr += sizeof(uint32_t);
@@ -4394,9 +4413,9 @@ retry:
ALIGN(t->buffer->data_size,
sizeof(void *));
tr.secctx = t->security_ctx;
if (t->security_ctx) {
cmd = BR_TRANSACTION_SEC_CTX;
tr.secctx = t->security_ctx;
trsize = sizeof(tr);
}
if (put_user(cmd, (uint32_t __user *)ptr)) {
@@ -4426,7 +4445,7 @@ retry:
"%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
(cmd == BR_TRANSACTION_SEC_CTX) ?
(cmd == BR_TRANSACTION_SEC_CTX) ?
"BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
t->debug_id, t_from ? t_from->proc->pid : 0,
t_from ? t_from->pid : 0, cmd,
@@ -4675,6 +4694,15 @@ static int binder_thread_release(struct binder_proc *proc,
binder_inner_proc_unlock(thread->proc);
/*
* This is needed to avoid races between wake_up_poll() above and
* and ep_remove_waitqueue() called for other reasons (eg the epoll file
* descriptor being closed); ep_remove_waitqueue() holds an RCU read
* lock, so we can be sure it's done after calling synchronize_rcu().
*/
if (thread->looper & BINDER_LOOPER_STATE_POLL)
synchronize_rcu();
if (send_reply)
binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
binder_release_work(proc, &thread->todo);
@@ -4955,7 +4983,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
ret = -EINVAL;
goto err;
}
if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
&ver->protocol_version)) {
ret = -EINVAL;
@@ -5074,7 +5101,9 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
failure_string = "bad vm_flags";
goto err_bad_arg;
}
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
vma->vm_flags &= ~VM_MAYWRITE;
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
@@ -5087,7 +5116,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
return 0;
err_bad_arg:
pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
return ret;
}
@@ -5097,7 +5126,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
struct binder_proc *proc;
struct binder_device *binder_dev;
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
current->group_leader->pid, current->pid);
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
@@ -5144,7 +5173,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
* anyway print all contexts that a given PID has, so this
* is not a problem.
*/
proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
binder_debugfs_dir_entry_proc,
(void *)(unsigned long)proc->pid,
&binder_proc_fops);
@@ -5411,7 +5440,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
spin_lock(&t->lock);
to_proc = t->to_proc;
seq_printf(m,
"%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
"%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
prefix, t->debug_id, t,
t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0,
@@ -5436,7 +5465,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
}
if (buffer->target_node)
seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd data %p\n",
seq_printf(m, " size %zd:%zd data %pK\n",
buffer->data_size, buffer->offsets_size,
buffer->data);
}
@@ -5971,11 +6000,13 @@ static int __init init_binder_device(const char *name)
static int __init binder_init(void)
{
int ret;
char *device_name, *device_names;
char *device_name, *device_names, *device_tmp;
struct binder_device *device;
struct hlist_node *tmp;
binder_alloc_shrinker_init();
ret = binder_alloc_shrinker_init();
if (ret)
return ret;
atomic_set(&binder_transaction_log.cur, ~0U);
atomic_set(&binder_transaction_log_failed.cur, ~0U);
@@ -5990,27 +6021,27 @@ static int __init binder_init(void)
if (binder_debugfs_dir_entry_root) {
debugfs_create_file("state",
S_IRUGO,
0444,
binder_debugfs_dir_entry_root,
NULL,
&binder_state_fops);
debugfs_create_file("stats",
S_IRUGO,
0444,
binder_debugfs_dir_entry_root,
NULL,
&binder_stats_fops);
debugfs_create_file("transactions",
S_IRUGO,
0444,
binder_debugfs_dir_entry_root,
NULL,
&binder_transactions_fops);
debugfs_create_file("transaction_log",
S_IRUGO,
0444,
binder_debugfs_dir_entry_root,
&binder_transaction_log,
&binder_transaction_log_fops);
debugfs_create_file("failed_transaction_log",
S_IRUGO,
0444,
binder_debugfs_dir_entry_root,
&binder_transaction_log_failed,
&binder_transaction_log_fops);
@@ -6027,7 +6058,8 @@ static int __init binder_init(void)
}
strcpy(device_names, binder_devices_param);
while ((device_name = strsep(&device_names, ","))) {
device_tmp = device_names;
while ((device_name = strsep(&device_tmp, ","))) {
ret = init_binder_device(device_name);
if (ret)
goto err_init_binder_device_failed;
@@ -6041,6 +6073,9 @@ err_init_binder_device_failed:
hlist_del(&device->hlist);
kfree(device);
}
kfree(device_names);
err_alloc_device_names_failed:
debugfs_remove_recursive(binder_debugfs_dir_entry_root);

View File

@@ -218,13 +218,12 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
mm = alloc->vma_vm_mm;
if (mm) {
down_write(&mm->mmap_sem);
down_read(&mm->mmap_sem);
if (!mmget_still_valid(mm)) {
if (allocate == 0)
goto free_range;
goto err_no_vma;
}
vma = alloc->vma;
}
@@ -293,7 +292,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
/* vm_insert_page does not seem to increment the refcount */
}
if (mm) {
up_write(&mm->mmap_sem);
up_read(&mm->mmap_sem);
mmput(mm);
}
return 0;
@@ -326,17 +325,18 @@ err_page_ptr_cleared:
}
err_no_vma:
if (mm) {
up_write(&mm->mmap_sem);
up_read(&mm->mmap_sem);
mmput(mm);
}
return vma ? -ENOMEM : -ESRCH;
}
struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
int is_async)
static struct binder_buffer *binder_alloc_new_buf_locked(
struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
int is_async)
{
struct rb_node *n = alloc->free_buffers.rb_node;
struct binder_buffer *buffer;
@@ -674,7 +674,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
goto err_already_mapped;
}
area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC);
if (area == NULL) {
ret = -ENOMEM;
failure_string = "get_vm_area";
@@ -931,7 +931,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
mm = alloc->vma_vm_mm;
if (!atomic_inc_not_zero(&mm->mm_users))
/* Same as mmget_not_zero() in later kernel versions */
if (!atomic_inc_not_zero(&alloc->vma_vm_mm->mm_users))
goto err_mmget;
if (!down_write_trylock(&mm->mmap_sem))
goto err_down_write_mmap_sem_failed;
@@ -1011,8 +1012,14 @@ void binder_alloc_init(struct binder_alloc *alloc)
INIT_LIST_HEAD(&alloc->buffers);
}
void binder_alloc_shrinker_init(void)
int binder_alloc_shrinker_init(void)
{
list_lru_init(&binder_alloc_lru);
register_shrinker(&binder_shrinker);
int ret = list_lru_init(&binder_alloc_lru);
if (ret == 0) {
ret = register_shrinker(&binder_shrinker);
if (ret)
list_lru_destroy(&binder_alloc_lru);
}
return ret;
}

View File

@@ -129,7 +129,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t extra_buffers_size,
int is_async);
extern void binder_alloc_init(struct binder_alloc *alloc);
void binder_alloc_shrinker_init(void);
extern int binder_alloc_shrinker_init(void);
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
extern struct binder_buffer *
binder_alloc_prepare_to_free(struct binder_alloc *alloc,
@@ -184,3 +184,4 @@ binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc)
}
#endif /* _LINUX_BINDER_ALLOC_H */