From 5af2825013b04d6e0b4da23d81c025ec52bab416 Mon Sep 17 00:00:00 2001 From: kondors1995 Date: Wed, 7 Feb 2024 15:48:41 +0200 Subject: [PATCH 1/9] Revert "binder: Adapt to new binder alloc API" This reverts commit 0adda2e6bd552432289a7e630611bc94dc60c218. --- drivers/android/binder.c | 39 ++++++++++++++++--------- drivers/android/binder_alloc_selftest.c | 8 ++--- drivers/android/binder_trace.h | 2 +- 3 files changed, 30 insertions(+), 19 deletions(-) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 62c08debcb11..55274e7c15da 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2193,8 +2193,9 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, * Convert the address to an offset relative to * the base of the transaction buffer. */ - fda_offset = parent->buffer - buffer->user_data + - fda->parent_offset; + fda_offset = + (parent->buffer - (uintptr_t)buffer->user_data) + + fda->parent_offset; for (fd_index = 0; fd_index < fda->num_fds; fd_index++) { u32 fd; @@ -2449,7 +2450,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, * Convert the address to an offset relative to * the base of the transaction buffer. */ - fda_offset = parent->buffer - t->buffer->user_data + + fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + fda->parent_offset; if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) { binder_user_error("%d:%d parent offset not aligned correctly.\n", @@ -2517,9 +2518,14 @@ static int binder_fixup_parent(struct binder_transaction *t, proc->pid, thread->pid); return -EINVAL; } - buffer_offset = bp->parent_offset + parent->buffer - b->user_data; - binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset, - &bp->buffer, sizeof(bp->buffer)); + buffer_offset = bp->parent_offset + + (uintptr_t)parent->buffer - (uintptr_t)b->user_data; + if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset, + &bp->buffer, sizeof(bp->buffer))) { + binder_user_error("%d:%d got transaction with invalid parent offset\n", + proc->pid, thread->pid); + return -EINVAL; + } return 0; } @@ -3055,7 +3061,7 @@ static void binder_transaction(struct binder_proc *proc, t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, tr->offsets_size, extra_buffers_size, - !reply && (t->flags & TF_ONE_WAY)); + !reply && (t->flags & TF_ONE_WAY), current->tgid); if (IS_ERR(t->buffer)) { /* * -ESRCH indicates VMA cleared. The target is dying. @@ -3074,10 +3080,14 @@ static void binder_transaction(struct binder_proc *proc, ALIGN(extra_buffers_size, sizeof(void *)) - ALIGN(secctx_sz, sizeof(u64)); - t->security_ctx = t->buffer->user_data + buf_offset; - binder_alloc_copy_to_buffer(&target_proc->alloc, - t->buffer, buf_offset, - secctx, secctx_sz); + t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset; + err = binder_alloc_copy_to_buffer(&target_proc->alloc, + t->buffer, buf_offset, + secctx, secctx_sz); + if (err) { + t->security_ctx = 0; + WARN_ON(1); + } security_release_secctx(secctx, secctx_sz); secctx = NULL; } @@ -3302,7 +3312,8 @@ static void binder_transaction(struct binder_proc *proc, goto err_copy_data_failed; } /* Fixup buffer pointer to target proc address space */ - bp->buffer = t->buffer->user_data + sg_buf_offset; + bp->buffer = (uintptr_t) + t->buffer->user_data + sg_buf_offset; sg_buf_offset += ALIGN(bp->length, sizeof(u64)); num_valid = (buffer_offset - off_start_offset) / @@ -4440,7 +4451,7 @@ retry: } trd->data_size = t->buffer->data_size; trd->offsets_size = t->buffer->offsets_size; - trd->data.ptr.buffer = t->buffer->user_data; + trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data; trd->data.ptr.offsets = trd->data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); @@ -5732,7 +5743,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m, } if (buffer->target_node) seq_printf(m, " node %d", buffer->target_node->debug_id); - seq_printf(m, " size %zd:%zd data %lx\n", + seq_printf(m, " size %zd:%zd data %pK\n", buffer->data_size, buffer->offsets_size, buffer->user_data); } diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c index 1ebf5a85e6fd..c839c490fde3 100644 --- a/drivers/android/binder_alloc_selftest.c +++ b/drivers/android/binder_alloc_selftest.c @@ -102,11 +102,11 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc, struct binder_buffer *buffer, size_t size) { - unsigned long page_addr; - unsigned long end; + void __user *page_addr; + void __user *end; int page_index; - end = PAGE_ALIGN(buffer->user_data + size); + end = (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size); page_addr = buffer->user_data; for (; page_addr < end; page_addr += PAGE_SIZE) { page_index = (page_addr - alloc->buffer) / PAGE_SIZE; @@ -128,7 +128,7 @@ static void binder_selftest_alloc_buf(struct binder_alloc *alloc, int i; for (i = 0; i < BUFFER_NUM; i++) { - buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0); + buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0, 0); if (IS_ERR(buffers[i]) || !check_buffer_pages_allocated(alloc, buffers[i], sizes[i])) { diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h index d3391ad16aeb..f45173011d8f 100644 --- a/drivers/android/binder_trace.h +++ b/drivers/android/binder_trace.h @@ -312,7 +312,7 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_update_buffer_release, TRACE_EVENT(binder_update_page_range, TP_PROTO(struct binder_alloc *alloc, bool allocate, - unsigned long start, unsigned long end), + void __user *start, void __user *end), TP_ARGS(alloc, allocate, start, end), TP_STRUCT__entry( __field(int, proc) From 067b314ffc202ed0c5b9ffcc2d77c861fc194b1f Mon Sep 17 00:00:00 2001 From: kondors1995 Date: Wed, 7 Feb 2024 15:48:43 +0200 Subject: [PATCH 2/9] Revert "binder: Fix build" This reverts commit 2cc7bbb9a9297175982c28134684bf595b50d14e. --- drivers/android/binder_alloc.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 675d449b0a8d..9765a476059d 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -25,6 +25,7 @@ #include #include "binder_alloc.h" #include "binder_trace.h" +#include struct list_lru binder_freelist; @@ -236,7 +237,7 @@ static int binder_install_single_page(struct binder_alloc *alloc, * Protected with mmap_sem in write mode as multiple tasks * might race to install the same page. */ - down_read(&alloc->vma_vm_mm->mmap_sem); + mmap_write_lock(alloc->vma_vm_mm); if (binder_get_installed_page(lru_page)) goto out; @@ -265,7 +266,7 @@ static int binder_install_single_page(struct binder_alloc *alloc, /* Mark page installation complete and safe to use */ binder_set_installed_page(lru_page, page); out: - up_read(&alloc->vma_vm_mm->mmap_sem); + mmap_write_unlock(alloc->vma_vm_mm); mmput_async(alloc->vma_vm_mm); return ret; } @@ -466,6 +467,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( unsigned long curr_last_page; size_t buffer_size; + trace_android_vh_binder_alloc_new_buf_locked(size, alloc, is_async); if (is_async && alloc->free_async_space < size) { binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, @@ -1088,7 +1090,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, if (!mmget_not_zero(mm)) goto err_mmget; - if (!down_read_trylock(&mm->mmap_sem)) + if (!mmap_read_trylock(mm)) goto err_mmap_read_lock_failed; if (!spin_trylock(&alloc->lock)) goto err_get_alloc_lock_failed; @@ -1121,7 +1123,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, trace_binder_unmap_user_end(alloc, index); } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); mmput_async(mm); __free_page(page_to_free); @@ -1132,7 +1134,7 @@ err_invalid_vma: err_page_already_freed: spin_unlock(&alloc->lock); err_get_alloc_lock_failed: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); err_mmap_read_lock_failed: mmput_async(mm); err_mmget: From 5e5fb6cb79898efbaf68297fe5513b63669459cc Mon Sep 17 00:00:00 2001 From: kondors1995 Date: Wed, 7 Feb 2024 15:48:43 +0200 Subject: [PATCH 3/9] Revert "binder_alloc: Checkout to android13-5.10" This reverts commit a4d0192fe99688841f85006070ba2128d75f3777. --- drivers/android/binder_alloc.c | 1456 +++++++++++------------ drivers/android/binder_alloc.h | 60 +- drivers/android/binder_alloc_selftest.c | 11 +- 3 files changed, 749 insertions(+), 778 deletions(-) diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 9765a476059d..c94f50380631 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -25,9 +25,8 @@ #include #include "binder_alloc.h" #include "binder_trace.h" -#include -struct list_lru binder_freelist; +struct list_lru binder_alloc_lru; static DEFINE_MUTEX(binder_alloc_mmap_lock); @@ -131,20 +130,23 @@ static void binder_insert_allocated_buffer_locked( static struct binder_buffer *binder_alloc_prepare_to_free_locked( struct binder_alloc *alloc, - unsigned long user_ptr) + uintptr_t user_ptr) { struct rb_node *n = alloc->allocated_buffers.rb_node; struct binder_buffer *buffer; + void __user *uptr; + + uptr = (void __user *)user_ptr; while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); BUG_ON(buffer->free); - if (user_ptr < buffer->user_data) { + if (uptr < buffer->user_data) n = n->rb_left; - } else if (user_ptr > buffer->user_data) { + else if (uptr > buffer->user_data) n = n->rb_right; - } else { + else { /* * Guard against user threads attempting to * free the buffer when in use by kernel or @@ -171,151 +173,65 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked( * Return: Pointer to buffer or NULL */ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, - unsigned long user_ptr) + uintptr_t user_ptr) { struct binder_buffer *buffer; - spin_lock(&alloc->lock); + mutex_lock(&alloc->mutex); buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); - spin_unlock(&alloc->lock); + mutex_unlock(&alloc->mutex); return buffer; } -static inline void -binder_set_installed_page(struct binder_lru_page *lru_page, - struct page *page) -{ - /* Pairs with acquire in binder_get_installed_page() */ - smp_store_release(&lru_page->page_ptr, page); -} - -static inline struct page * -binder_get_installed_page(struct binder_lru_page *lru_page) -{ - /* Pairs with release in binder_set_installed_page() */ - return smp_load_acquire(&lru_page->page_ptr); -} - -static void binder_lru_freelist_add(struct binder_alloc *alloc, - unsigned long start, unsigned long end) +static int binder_update_page_range(struct binder_alloc *alloc, int allocate, + void __user *start, void __user *end) { + void __user *page_addr; + unsigned long user_page_addr; struct binder_lru_page *page; - unsigned long page_addr; + struct vm_area_struct *vma = NULL; + struct mm_struct *mm = NULL; + bool need_mm = false; - trace_binder_update_page_range(alloc, false, start, end); + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: %s pages %pK-%pK\n", alloc->pid, + allocate ? "allocate" : "free", start, end); + + if (end <= start) + return 0; + + trace_binder_update_page_range(alloc, allocate, start, end); + + if (allocate == 0) + goto free_range; for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { - size_t index; - int ret; - - index = (page_addr - alloc->buffer) / PAGE_SIZE; - page = &alloc->pages[index]; - - if (!binder_get_installed_page(page)) - continue; - - trace_binder_free_lru_start(alloc, index); - - ret = list_lru_add(&binder_freelist, &page->lru); - WARN_ON(!ret); - - trace_binder_free_lru_end(alloc, index); - } -} - -static int binder_install_single_page(struct binder_alloc *alloc, - struct binder_lru_page *lru_page, - unsigned long addr) -{ - struct page *page; - int ret = 0; - - if (!mmget_not_zero(alloc->vma_vm_mm)) - return -ESRCH; - - /* - * Protected with mmap_sem in write mode as multiple tasks - * might race to install the same page. - */ - mmap_write_lock(alloc->vma_vm_mm); - if (binder_get_installed_page(lru_page)) - goto out; - - if (!alloc->vma) { - pr_err("%d: %s failed, no vma\n", alloc->pid, __func__); - ret = -ESRCH; - goto out; + page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; + if (!page->page_ptr) { + need_mm = true; + break; + } } - page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); - if (!page) { - pr_err("%d: failed to allocate page\n", alloc->pid); - ret = -ENOMEM; - goto out; + if (need_mm && mmget_not_zero(alloc->vma_vm_mm)) + mm = alloc->vma_vm_mm; + + if (mm) { + down_read(&mm->mmap_sem); + vma = alloc->vma; } - ret = vm_insert_page(alloc->vma, addr, page); - if (ret) { - pr_err("%d: %s failed to insert page at offset %lx with %d\n", - alloc->pid, __func__, addr - alloc->buffer, ret); - __free_page(page); - ret = -ENOMEM; - goto out; + if (!vma && need_mm) { + binder_alloc_debug(BINDER_DEBUG_USER_ERROR, + "%d: binder_alloc_buf failed to map pages in userspace, no vma\n", + alloc->pid); + goto err_no_vma; } - /* Mark page installation complete and safe to use */ - binder_set_installed_page(lru_page, page); -out: - mmap_write_unlock(alloc->vma_vm_mm); - mmput_async(alloc->vma_vm_mm); - return ret; -} - -static int binder_install_buffer_pages(struct binder_alloc *alloc, - struct binder_buffer *buffer, - size_t size) -{ - struct binder_lru_page *page; - unsigned long start, final; - unsigned long page_addr; - - start = buffer->user_data & PAGE_MASK; - final = PAGE_ALIGN(buffer->user_data + size); - - for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) { - unsigned long index; - int ret; - - index = (page_addr - alloc->buffer) / PAGE_SIZE; - page = &alloc->pages[index]; - - if (binder_get_installed_page(page)) - continue; - - trace_binder_alloc_page_start(alloc, index); - - ret = binder_install_single_page(alloc, page, page_addr); - if (ret) - return ret; - - trace_binder_alloc_page_end(alloc, index); - } - - return 0; -} - -/* The range of pages should exclude those shared with other buffers */ -static void binder_lru_freelist_del(struct binder_alloc *alloc, - unsigned long start, unsigned long end) -{ - struct binder_lru_page *page; - unsigned long page_addr; - - trace_binder_update_page_range(alloc, true, start, end); - for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { - unsigned long index; + int ret; bool on_lru; + size_t index; index = (page_addr - alloc->buffer) / PAGE_SIZE; page = &alloc->pages[index]; @@ -323,16 +239,79 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc, if (page->page_ptr) { trace_binder_alloc_lru_start(alloc, index); - on_lru = list_lru_del(&binder_freelist, &page->lru); + on_lru = list_lru_del(&binder_alloc_lru, &page->lru); WARN_ON(!on_lru); trace_binder_alloc_lru_end(alloc, index); continue; } + if (WARN_ON(!vma)) + goto err_page_ptr_cleared; + + trace_binder_alloc_page_start(alloc, index); + page->page_ptr = alloc_page(GFP_KERNEL | + __GFP_HIGHMEM | + __GFP_ZERO); + if (!page->page_ptr) { + pr_err("%d: binder_alloc_buf failed for page at %pK\n", + alloc->pid, page_addr); + goto err_alloc_page_failed; + } + page->alloc = alloc; + INIT_LIST_HEAD(&page->lru); + + user_page_addr = (uintptr_t)page_addr; + ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr); + if (ret) { + pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", + alloc->pid, user_page_addr); + goto err_vm_insert_page_failed; + } + if (index + 1 > alloc->pages_high) alloc->pages_high = index + 1; + + trace_binder_alloc_page_end(alloc, index); } + if (mm) { + up_read(&mm->mmap_sem); + mmput(mm); + } + return 0; + +free_range: + for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) { + bool ret; + size_t index; + + index = (page_addr - alloc->buffer) / PAGE_SIZE; + page = &alloc->pages[index]; + + trace_binder_free_lru_start(alloc, index); + + ret = list_lru_add(&binder_alloc_lru, &page->lru); + WARN_ON(!ret); + + trace_binder_free_lru_end(alloc, index); + if (page_addr == start) + break; + continue; + +err_vm_insert_page_failed: + __free_page(page->page_ptr); + page->page_ptr = NULL; +err_alloc_page_failed: +err_page_ptr_cleared: + if (page_addr == start) + break; + } +err_no_vma: + if (mm) { + up_read(&mm->mmap_sem); + mmput(mm); + } + return vma ? -ENOMEM : -ESRCH; } @@ -364,44 +343,7 @@ static inline struct vm_area_struct *binder_alloc_get_vma( return vma; } -static void debug_no_space_locked(struct binder_alloc *alloc) -{ - size_t largest_alloc_size = 0; - struct binder_buffer *buffer; - size_t allocated_buffers = 0; - size_t largest_free_size = 0; - size_t total_alloc_size = 0; - size_t total_free_size = 0; - size_t free_buffers = 0; - size_t buffer_size; - struct rb_node *n; - - for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) { - buffer = rb_entry(n, struct binder_buffer, rb_node); - buffer_size = binder_alloc_buffer_size(alloc, buffer); - allocated_buffers++; - total_alloc_size += buffer_size; - if (buffer_size > largest_alloc_size) - largest_alloc_size = buffer_size; - } - - for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) { - buffer = rb_entry(n, struct binder_buffer, rb_node); - buffer_size = binder_alloc_buffer_size(alloc, buffer); - free_buffers++; - total_free_size += buffer_size; - if (buffer_size > largest_free_size) - largest_free_size = buffer_size; - } - - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", - total_alloc_size, allocated_buffers, - largest_alloc_size, total_free_size, - free_buffers, largest_free_size); -} - -static bool debug_low_async_space_locked(struct binder_alloc *alloc) +static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid) { /* * Find the amount and size of buffers allocated by the current caller; @@ -410,20 +352,10 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc) * and at some point we'll catch them in the act. This is more efficient * than keeping a map per pid. */ + struct rb_node *n; struct binder_buffer *buffer; size_t total_alloc_size = 0; - int pid = current->tgid; size_t num_buffers = 0; - struct rb_node *n; - - /* - * Only start detecting spammers once we have less than 20% of async - * space left (which is less than 10% of total buffer size). - */ - if (alloc->free_async_space >= alloc->buffer_size / 10) { - alloc->oneway_spam_detected = false; - return false; - } for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) { @@ -432,7 +364,8 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc) continue; if (!buffer->async_transaction) continue; - total_alloc_size += binder_alloc_buffer_size(alloc, buffer); + total_alloc_size += binder_alloc_buffer_size(alloc, buffer) + + sizeof(struct binder_buffer); num_buffers++; } @@ -453,30 +386,57 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc) return false; } -/* Callers preallocate @new_buffer, it is freed by this function if unused */ static struct binder_buffer *binder_alloc_new_buf_locked( struct binder_alloc *alloc, - struct binder_buffer *new_buffer, - size_t size, - int is_async) + size_t data_size, + size_t offsets_size, + size_t extra_buffers_size, + int is_async, + int pid) { struct rb_node *n = alloc->free_buffers.rb_node; - struct rb_node *best_fit = NULL; struct binder_buffer *buffer; - unsigned long next_used_page; - unsigned long curr_last_page; size_t buffer_size; + struct rb_node *best_fit = NULL; + void __user *has_page_addr; + void __user *end_page_addr; + size_t size, data_offsets_size; + int ret; - trace_android_vh_binder_alloc_new_buf_locked(size, alloc, is_async); + if (!binder_alloc_get_vma(alloc)) { + binder_alloc_debug(BINDER_DEBUG_USER_ERROR, + "%d: binder_alloc_buf, no vma\n", + alloc->pid); + return ERR_PTR(-ESRCH); + } - if (is_async && alloc->free_async_space < size) { + data_offsets_size = ALIGN(data_size, sizeof(void *)) + + ALIGN(offsets_size, sizeof(void *)); + + if (data_offsets_size < data_size || data_offsets_size < offsets_size) { + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: got transaction with invalid size %zd-%zd\n", + alloc->pid, data_size, offsets_size); + return ERR_PTR(-EINVAL); + } + size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *)); + if (size < data_offsets_size || size < extra_buffers_size) { + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: got transaction with invalid extra_buffers_size %zd\n", + alloc->pid, extra_buffers_size); + return ERR_PTR(-EINVAL); + } + if (is_async && + alloc->free_async_space < size + sizeof(struct binder_buffer)) { binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd failed, no async space left\n", alloc->pid, size); - buffer = ERR_PTR(-ENOSPC); - goto out; + return ERR_PTR(-ENOSPC); } + /* Pad 0-size buffers so they get assigned unique addresses */ + size = max(size, sizeof(void *)); + while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); BUG_ON(!buffer->free); @@ -485,92 +445,121 @@ static struct binder_buffer *binder_alloc_new_buf_locked( if (size < buffer_size) { best_fit = n; n = n->rb_left; - } else if (size > buffer_size) { + } else if (size > buffer_size) n = n->rb_right; - } else { + else { best_fit = n; break; } } + if (best_fit == NULL) { + size_t allocated_buffers = 0; + size_t largest_alloc_size = 0; + size_t total_alloc_size = 0; + size_t free_buffers = 0; + size_t largest_free_size = 0; + size_t total_free_size = 0; - if (unlikely(!best_fit)) { + for (n = rb_first(&alloc->allocated_buffers); n != NULL; + n = rb_next(n)) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + buffer_size = binder_alloc_buffer_size(alloc, buffer); + allocated_buffers++; + total_alloc_size += buffer_size; + if (buffer_size > largest_alloc_size) + largest_alloc_size = buffer_size; + } + for (n = rb_first(&alloc->free_buffers); n != NULL; + n = rb_next(n)) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + buffer_size = binder_alloc_buffer_size(alloc, buffer); + free_buffers++; + total_free_size += buffer_size; + if (buffer_size > largest_free_size) + largest_free_size = buffer_size; + } binder_alloc_debug(BINDER_DEBUG_USER_ERROR, "%d: binder_alloc_buf size %zd failed, no address space\n", alloc->pid, size); - debug_no_space_locked(alloc); - buffer = ERR_PTR(-ENOSPC); - goto out; + binder_alloc_debug(BINDER_DEBUG_USER_ERROR, + "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", + total_alloc_size, allocated_buffers, + largest_alloc_size, total_free_size, + free_buffers, largest_free_size); + return ERR_PTR(-ENOSPC); } - - if (buffer_size != size) { - /* Found an oversized buffer and needs to be split */ + if (n == NULL) { buffer = rb_entry(best_fit, struct binder_buffer, rb_node); buffer_size = binder_alloc_buffer_size(alloc, buffer); - - WARN_ON(n || buffer_size == size); - new_buffer->user_data = buffer->user_data + size; - list_add(&new_buffer->entry, &buffer->entry); - new_buffer->free = 1; - binder_insert_free_buffer(alloc, new_buffer); - new_buffer = NULL; } binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", alloc->pid, size, buffer, buffer_size); - /* - * Now we remove the pages from the freelist. A clever calculation - * with buffer_size determines if the last page is shared with an - * adjacent in-use buffer. In such case, the page has been already - * removed from the freelist so we trim our range short. - */ - next_used_page = (buffer->user_data + buffer_size) & PAGE_MASK; - curr_last_page = PAGE_ALIGN(buffer->user_data + size); - binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data), - min(next_used_page, curr_last_page)); + has_page_addr = (void __user *) + (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK); + WARN_ON(n && buffer_size != size); + end_page_addr = + (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size); + if (end_page_addr > has_page_addr) + end_page_addr = has_page_addr; + ret = binder_update_page_range(alloc, 1, (void __user *) + PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr); + if (ret) + return ERR_PTR(ret); - rb_erase(&buffer->rb_node, &alloc->free_buffers); + if (buffer_size != size) { + struct binder_buffer *new_buffer; + + new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!new_buffer) { + pr_err("%s: %d failed to alloc new buffer struct\n", + __func__, alloc->pid); + goto err_alloc_buf_struct_failed; + } + new_buffer->user_data = (u8 __user *)buffer->user_data + size; + list_add(&new_buffer->entry, &buffer->entry); + new_buffer->free = 1; + binder_insert_free_buffer(alloc, new_buffer); + } + + rb_erase(best_fit, &alloc->free_buffers); buffer->free = 0; buffer->allow_user_free = 0; binder_insert_allocated_buffer_locked(alloc, buffer); + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: binder_alloc_buf size %zd got %pK\n", + alloc->pid, size, buffer); + buffer->data_size = data_size; + buffer->offsets_size = offsets_size; buffer->async_transaction = is_async; + buffer->extra_buffers_size = extra_buffers_size; + buffer->pid = pid; buffer->oneway_spam_suspect = false; if (is_async) { - alloc->free_async_space -= size; + alloc->free_async_space -= size + sizeof(struct binder_buffer); binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, "%d: binder_alloc_buf size %zd async free %zd\n", alloc->pid, size, alloc->free_async_space); - if (debug_low_async_space_locked(alloc)) - buffer->oneway_spam_suspect = true; + if (alloc->free_async_space < alloc->buffer_size / 10) { + /* + * Start detecting spammers once we have less than 20% + * of async space left (which is less than 10% of total + * buffer size). + */ + buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid); + } else { + alloc->oneway_spam_detected = false; + } } - -out: - /* Discard possibly unused new_buffer */ - kfree(new_buffer); return buffer; -} -/* Calculate the sanitized total size, returns 0 for invalid request */ -static inline size_t sanitized_size(size_t data_size, - size_t offsets_size, - size_t extra_buffers_size) -{ - size_t total, tmp; - - /* Align to pointer size and check for overflows */ - tmp = ALIGN(data_size, sizeof(void *)) + - ALIGN(offsets_size, sizeof(void *)); - if (tmp < data_size || tmp < offsets_size) - return 0; - total = tmp + ALIGN(extra_buffers_size, sizeof(void *)); - if (total < tmp || total < extra_buffers_size) - return 0; - - /* Pad 0-sized buffers so they get a unique address */ - total = max(total, sizeof(void *)); - - return total; +err_alloc_buf_struct_failed: + binder_update_page_range(alloc, 0, (void __user *) + PAGE_ALIGN((uintptr_t)buffer->user_data), + end_page_addr); + return ERR_PTR(-ENOMEM); } /** @@ -580,101 +569,87 @@ static inline size_t sanitized_size(size_t data_size, * @offsets_size: user specified buffer offset * @extra_buffers_size: size of extra space for meta-data (eg, security context) * @is_async: buffer for async transaction + * @pid: pid to attribute allocation to (used for debugging) * * Allocate a new buffer given the requested sizes. Returns * the kernel version of the buffer pointer. The size allocated * is the sum of the three given sizes (each rounded up to * pointer-sized boundary) * - * Return: The allocated buffer or %ERR_PTR(-errno) if error + * Return: The allocated buffer or %NULL if error */ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, size_t data_size, size_t offsets_size, size_t extra_buffers_size, - int is_async) + int is_async, + int pid) { - struct binder_buffer *buffer, *next; - size_t size; - int ret; + struct binder_buffer *buffer; - /* Check binder_alloc is fully initialized */ - if (!binder_alloc_get_vma(alloc)) { - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "%d: binder_alloc_buf, no vma\n", - alloc->pid); - return ERR_PTR(-ESRCH); - } - - size = sanitized_size(data_size, offsets_size, extra_buffers_size); - if (unlikely(!size)) { - binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: got transaction with invalid size %zd-%zd-%zd\n", - alloc->pid, data_size, offsets_size, - extra_buffers_size); - return ERR_PTR(-EINVAL); - } - - /* Preallocate the next buffer */ - next = kzalloc(sizeof(*next), GFP_KERNEL); - if (!next) - return ERR_PTR(-ENOMEM); - - spin_lock(&alloc->lock); - buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async); - if (IS_ERR(buffer)) { - spin_unlock(&alloc->lock); - goto out; - } - - buffer->data_size = data_size; - buffer->offsets_size = offsets_size; - buffer->extra_buffers_size = extra_buffers_size; - buffer->pid = current->tgid; - spin_unlock(&alloc->lock); - - ret = binder_install_buffer_pages(alloc, buffer, size); - if (ret) { - binder_alloc_free_buf(alloc, buffer); - buffer = ERR_PTR(ret); - } -out: + mutex_lock(&alloc->mutex); + buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, + extra_buffers_size, is_async, pid); + mutex_unlock(&alloc->mutex); return buffer; } -static unsigned long buffer_start_page(struct binder_buffer *buffer) +static void __user *buffer_start_page(struct binder_buffer *buffer) { - return buffer->user_data & PAGE_MASK; + return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK); } -static unsigned long prev_buffer_end_page(struct binder_buffer *buffer) +static void __user *prev_buffer_end_page(struct binder_buffer *buffer) { - return (buffer->user_data - 1) & PAGE_MASK; + return (void __user *) + (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK); } static void binder_delete_free_buffer(struct binder_alloc *alloc, struct binder_buffer *buffer) { - struct binder_buffer *prev, *next; - - if (PAGE_ALIGNED(buffer->user_data)) - goto skip_freelist; + struct binder_buffer *prev, *next = NULL; + bool to_free = true; BUG_ON(alloc->buffers.next == &buffer->entry); prev = binder_buffer_prev(buffer); BUG_ON(!prev->free); - if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) - goto skip_freelist; + if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) { + to_free = false; + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: merge free, buffer %pK share page with %pK\n", + alloc->pid, buffer->user_data, + prev->user_data); + } if (!list_is_last(&buffer->entry, &alloc->buffers)) { next = binder_buffer_next(buffer); - if (buffer_start_page(next) == buffer_start_page(buffer)) - goto skip_freelist; + if (buffer_start_page(next) == buffer_start_page(buffer)) { + to_free = false; + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: merge free, buffer %pK share page with %pK\n", + alloc->pid, + buffer->user_data, + next->user_data); + } } - binder_lru_freelist_add(alloc, buffer_start_page(buffer), - buffer_start_page(buffer) + PAGE_SIZE); -skip_freelist: + if (PAGE_ALIGNED(buffer->user_data)) { + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: merge free, buffer start %pK is page aligned\n", + alloc->pid, buffer->user_data); + to_free = false; + } + + if (to_free) { + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: merge free, buffer %pK do not share page with %pK or %pK\n", + alloc->pid, buffer->user_data, + prev->user_data, + next ? next->user_data : NULL); + binder_update_page_range(alloc, 0, buffer_start_page(buffer), + buffer_start_page(buffer) + PAGE_SIZE); + } list_del(&buffer->entry); kfree(buffer); } @@ -701,14 +676,17 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); if (buffer->async_transaction) { - alloc->free_async_space += buffer_size; + alloc->free_async_space += buffer_size + sizeof(struct binder_buffer); + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, "%d: binder_free_buf size %zd async free %zd\n", alloc->pid, size, alloc->free_async_space); } - binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data), - (buffer->user_data + buffer_size) & PAGE_MASK); + binder_update_page_range(alloc, 0, + (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data), + (void __user *)(((uintptr_t) + buffer->user_data + buffer_size) & PAGE_MASK)); rb_erase(&buffer->rb_node, &alloc->allocated_buffers); buffer->free = 1; @@ -732,6 +710,428 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, binder_insert_free_buffer(alloc, buffer); } +static void binder_alloc_clear_buf(struct binder_alloc *alloc, + struct binder_buffer *buffer); +/** + * binder_alloc_free_buf() - free a binder buffer + * @alloc: binder_alloc for this proc + * @buffer: kernel pointer to buffer + * + * Free the buffer allocated via binder_alloc_new_buf() + */ +void binder_alloc_free_buf(struct binder_alloc *alloc, + struct binder_buffer *buffer) +{ + /* + * We could eliminate the call to binder_alloc_clear_buf() + * from binder_alloc_deferred_release() by moving this to + * binder_alloc_free_buf_locked(). However, that could + * increase contention for the alloc mutex if clear_on_free + * is used frequently for large buffers. The mutex is not + * needed for correctness here. + */ + if (buffer->clear_on_free) { + binder_alloc_clear_buf(alloc, buffer); + buffer->clear_on_free = false; + } + mutex_lock(&alloc->mutex); + binder_free_buf_locked(alloc, buffer); + mutex_unlock(&alloc->mutex); +} + +/** + * binder_alloc_mmap_handler() - map virtual address space for proc + * @alloc: alloc structure for this proc + * @vma: vma passed to mmap() + * + * Called by binder_mmap() to initialize the space specified in + * vma for allocating binder buffers + * + * Return: + * 0 = success + * -EBUSY = address space already mapped + * -ENOMEM = failed to map memory to given address space + */ +int binder_alloc_mmap_handler(struct binder_alloc *alloc, + struct vm_area_struct *vma) +{ + int ret; + const char *failure_string; + struct binder_buffer *buffer; + + mutex_lock(&binder_alloc_mmap_lock); + if (alloc->buffer_size) { + ret = -EBUSY; + failure_string = "already mapped"; + goto err_already_mapped; + } + alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start, + SZ_4M); + mutex_unlock(&binder_alloc_mmap_lock); + + alloc->buffer = (void __user *)vma->vm_start; + + alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE, + sizeof(alloc->pages[0]), + GFP_KERNEL); + if (alloc->pages == NULL) { + ret = -ENOMEM; + failure_string = "alloc page array"; + goto err_alloc_pages_failed; + } + + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) { + ret = -ENOMEM; + failure_string = "alloc buffer struct"; + goto err_alloc_buf_struct_failed; + } + + buffer->user_data = alloc->buffer; + list_add(&buffer->entry, &alloc->buffers); + buffer->free = 1; + binder_insert_free_buffer(alloc, buffer); + alloc->free_async_space = alloc->buffer_size / 2; + binder_alloc_set_vma(alloc, vma); + mmgrab(alloc->vma_vm_mm); + + return 0; + +err_alloc_buf_struct_failed: + kfree(alloc->pages); + alloc->pages = NULL; +err_alloc_pages_failed: + alloc->buffer = NULL; + mutex_lock(&binder_alloc_mmap_lock); + alloc->buffer_size = 0; +err_already_mapped: + mutex_unlock(&binder_alloc_mmap_lock); + binder_alloc_debug(BINDER_DEBUG_USER_ERROR, + "%s: %d %lx-%lx %s failed %d\n", __func__, + alloc->pid, vma->vm_start, vma->vm_end, + failure_string, ret); + return ret; +} + + +void binder_alloc_deferred_release(struct binder_alloc *alloc) +{ + struct rb_node *n; + int buffers, page_count; + struct binder_buffer *buffer; + + buffers = 0; + mutex_lock(&alloc->mutex); + BUG_ON(alloc->vma); + + while ((n = rb_first(&alloc->allocated_buffers))) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + + /* Transaction should already have been freed */ + BUG_ON(buffer->transaction); + + if (buffer->clear_on_free) { + binder_alloc_clear_buf(alloc, buffer); + buffer->clear_on_free = false; + } + binder_free_buf_locked(alloc, buffer); + buffers++; + } + + while (!list_empty(&alloc->buffers)) { + buffer = list_first_entry(&alloc->buffers, + struct binder_buffer, entry); + WARN_ON(!buffer->free); + + list_del(&buffer->entry); + WARN_ON_ONCE(!list_empty(&alloc->buffers)); + kfree(buffer); + } + + page_count = 0; + if (alloc->pages) { + int i; + + for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { + void __user *page_addr; + bool on_lru; + + if (!alloc->pages[i].page_ptr) + continue; + + on_lru = list_lru_del(&binder_alloc_lru, + &alloc->pages[i].lru); + page_addr = alloc->buffer + i * PAGE_SIZE; + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%s: %d: page %d at %pK %s\n", + __func__, alloc->pid, i, page_addr, + on_lru ? "on lru" : "active"); + __free_page(alloc->pages[i].page_ptr); + page_count++; + } + kfree(alloc->pages); + } + mutex_unlock(&alloc->mutex); + if (alloc->vma_vm_mm) + mmdrop(alloc->vma_vm_mm); + + binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE, + "%s: %d buffers %d, pages %d\n", + __func__, alloc->pid, buffers, page_count); +} + +static void print_binder_buffer(struct seq_file *m, const char *prefix, + struct binder_buffer *buffer) +{ + seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n", + prefix, buffer->debug_id, buffer->user_data, + buffer->data_size, buffer->offsets_size, + buffer->extra_buffers_size, + buffer->transaction ? "active" : "delivered"); +} + +/** + * binder_alloc_print_allocated() - print buffer info + * @m: seq_file for output via seq_printf() + * @alloc: binder_alloc for this proc + * + * Prints information about every buffer associated with + * the binder_alloc state to the given seq_file + */ +void binder_alloc_print_allocated(struct seq_file *m, + struct binder_alloc *alloc) +{ + struct rb_node *n; + + mutex_lock(&alloc->mutex); + for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) + print_binder_buffer(m, " buffer", + rb_entry(n, struct binder_buffer, rb_node)); + mutex_unlock(&alloc->mutex); +} + +/** + * binder_alloc_print_pages() - print page usage + * @m: seq_file for output via seq_printf() + * @alloc: binder_alloc for this proc + */ +void binder_alloc_print_pages(struct seq_file *m, + struct binder_alloc *alloc) +{ + struct binder_lru_page *page; + int i; + int active = 0; + int lru = 0; + int free = 0; + + mutex_lock(&alloc->mutex); + /* + * Make sure the binder_alloc is fully initialized, otherwise we might + * read inconsistent state. + */ + if (binder_alloc_get_vma(alloc) != NULL) { + for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { + page = &alloc->pages[i]; + if (!page->page_ptr) + free++; + else if (list_empty(&page->lru)) + active++; + else + lru++; + } + } + mutex_unlock(&alloc->mutex); + seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); + seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); +} + +/** + * binder_alloc_get_allocated_count() - return count of buffers + * @alloc: binder_alloc for this proc + * + * Return: count of allocated buffers + */ +int binder_alloc_get_allocated_count(struct binder_alloc *alloc) +{ + struct rb_node *n; + int count = 0; + + mutex_lock(&alloc->mutex); + for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) + count++; + mutex_unlock(&alloc->mutex); + return count; +} + + +/** + * binder_alloc_vma_close() - invalidate address space + * @alloc: binder_alloc for this proc + * + * Called from binder_vma_close() when releasing address space. + * Clears alloc->vma to prevent new incoming transactions from + * allocating more buffers. + */ +void binder_alloc_vma_close(struct binder_alloc *alloc) +{ + binder_alloc_set_vma(alloc, NULL); +} + +/** + * binder_alloc_free_page() - shrinker callback to free pages + * @item: item to free + * @lock: lock protecting the item + * @cb_arg: callback argument + * + * Called from list_lru_walk() in binder_shrink_scan() to free + * up pages when the system is under memory pressure. + */ +enum lru_status binder_alloc_free_page(struct list_head *item, + struct list_lru_one *lru, + spinlock_t *lock, + void *cb_arg) + __must_hold(lock) +{ + struct mm_struct *mm = NULL; + struct binder_lru_page *page = container_of(item, + struct binder_lru_page, + lru); + struct binder_alloc *alloc; + uintptr_t page_addr; + size_t index; + struct vm_area_struct *vma; + + alloc = page->alloc; + if (!mutex_trylock(&alloc->mutex)) + goto err_get_alloc_mutex_failed; + + if (!page->page_ptr) + goto err_page_already_freed; + + index = page - alloc->pages; + page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; + + mm = alloc->vma_vm_mm; + if (!mmget_not_zero(mm)) + goto err_mmget; + if (!down_read_trylock(&mm->mmap_sem)) + goto err_down_read_mmap_sem_failed; + vma = binder_alloc_get_vma(alloc); + + list_lru_isolate(lru, item); + spin_unlock(lock); + + if (vma) { + trace_binder_unmap_user_start(alloc, index); + + zap_page_range(vma, page_addr, PAGE_SIZE); + + trace_binder_unmap_user_end(alloc, index); + } + up_read(&mm->mmap_sem); + mmput_async(mm); + + trace_binder_unmap_kernel_start(alloc, index); + + __free_page(page->page_ptr); + page->page_ptr = NULL; + + trace_binder_unmap_kernel_end(alloc, index); + + spin_lock(lock); + mutex_unlock(&alloc->mutex); + return LRU_REMOVED_RETRY; + +err_down_read_mmap_sem_failed: + mmput_async(mm); +err_mmget: +err_page_already_freed: + mutex_unlock(&alloc->mutex); +err_get_alloc_mutex_failed: + return LRU_SKIP; +} + +static unsigned long +binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc) +{ + unsigned long ret = list_lru_count(&binder_alloc_lru); + return ret; +} + +static unsigned long +binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) +{ + unsigned long ret; + + ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page, + NULL, sc->nr_to_scan); + return ret; +} + +static struct shrinker binder_shrinker = { + .count_objects = binder_shrink_count, + .scan_objects = binder_shrink_scan, + .seeks = DEFAULT_SEEKS, +}; + +/** + * binder_alloc_init() - called by binder_open() for per-proc initialization + * @alloc: binder_alloc for this proc + * + * Called from binder_open() to initialize binder_alloc fields for + * new binder proc + */ +void binder_alloc_init(struct binder_alloc *alloc) +{ + alloc->pid = current->group_leader->pid; + mutex_init(&alloc->mutex); + INIT_LIST_HEAD(&alloc->buffers); +} + +int binder_alloc_shrinker_init(void) +{ + int ret = list_lru_init(&binder_alloc_lru); + + if (ret == 0) { + ret = register_shrinker(&binder_shrinker); + if (ret) + list_lru_destroy(&binder_alloc_lru); + } + return ret; +} + +/** + * check_buffer() - verify that buffer/offset is safe to access + * @alloc: binder_alloc for this proc + * @buffer: binder buffer to be accessed + * @offset: offset into @buffer data + * @bytes: bytes to access from offset + * + * Check that the @offset/@bytes are within the size of the given + * @buffer and that the buffer is currently active and not freeable. + * Offsets must also be multiples of sizeof(u32). The kernel is + * allowed to touch the buffer in two cases: + * + * 1) when the buffer is being created: + * (buffer->free == 0 && buffer->allow_user_free == 0) + * 2) when the buffer is being torn down: + * (buffer->free == 0 && buffer->transaction == NULL). + * + * Return: true if the buffer is safe to access + */ +static inline bool check_buffer(struct binder_alloc *alloc, + struct binder_buffer *buffer, + binder_size_t offset, size_t bytes) +{ + size_t buffer_size = binder_alloc_buffer_size(alloc, buffer); + + return buffer_size >= bytes && + offset <= buffer_size - bytes && + IS_ALIGNED(offset, sizeof(u32)) && + !buffer->free && + (!buffer->allow_user_free || !buffer->transaction); +} + /** * binder_alloc_get_page() - get kernel pointer for given buffer offset * @alloc: binder_alloc for this proc @@ -797,433 +1197,6 @@ static void binder_alloc_clear_buf(struct binder_alloc *alloc, } } - -/** - * binder_alloc_free_buf() - free a binder buffer - * @alloc: binder_alloc for this proc - * @buffer: kernel pointer to buffer - * - * Free the buffer allocated via binder_alloc_new_buf() - */ -void binder_alloc_free_buf(struct binder_alloc *alloc, - struct binder_buffer *buffer) -{ - /* - * We could eliminate the call to binder_alloc_clear_buf() - * from binder_alloc_deferred_release() by moving this to - * binder_free_buf_locked(). However, that could - * increase contention for the alloc->lock if clear_on_free - * is used frequently for large buffers. This lock is not - * needed for correctness here. - */ - if (buffer->clear_on_free) { - binder_alloc_clear_buf(alloc, buffer); - buffer->clear_on_free = false; - } - spin_lock(&alloc->lock); - binder_free_buf_locked(alloc, buffer); - spin_unlock(&alloc->lock); -} - -/** - * binder_alloc_mmap_handler() - map virtual address space for proc - * @alloc: alloc structure for this proc - * @vma: vma passed to mmap() - * - * Called by binder_mmap() to initialize the space specified in - * vma for allocating binder buffers - * - * Return: - * 0 = success - * -EBUSY = address space already mapped - * -ENOMEM = failed to map memory to given address space - */ -int binder_alloc_mmap_handler(struct binder_alloc *alloc, - struct vm_area_struct *vma) -{ - struct binder_buffer *buffer; - const char *failure_string; - int ret, i; - - mutex_lock(&binder_alloc_mmap_lock); - if (alloc->buffer_size) { - ret = -EBUSY; - failure_string = "already mapped"; - goto err_already_mapped; - } - alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start, - SZ_4M); - mutex_unlock(&binder_alloc_mmap_lock); - - alloc->buffer = vma->vm_start; - - alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE, - sizeof(alloc->pages[0]), - GFP_KERNEL); - if (alloc->pages == NULL) { - ret = -ENOMEM; - failure_string = "alloc page array"; - goto err_alloc_pages_failed; - } - - for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { - alloc->pages[i].alloc = alloc; - INIT_LIST_HEAD(&alloc->pages[i].lru); - } - - buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); - if (!buffer) { - ret = -ENOMEM; - failure_string = "alloc buffer struct"; - goto err_alloc_buf_struct_failed; - } - - buffer->user_data = alloc->buffer; - list_add(&buffer->entry, &alloc->buffers); - buffer->free = 1; - binder_insert_free_buffer(alloc, buffer); - alloc->free_async_space = alloc->buffer_size / 2; - binder_alloc_set_vma(alloc, vma); - mmgrab(alloc->vma_vm_mm); - - return 0; - -err_alloc_buf_struct_failed: - kfree(alloc->pages); - alloc->pages = NULL; -err_alloc_pages_failed: - alloc->buffer = 0; - mutex_lock(&binder_alloc_mmap_lock); - alloc->buffer_size = 0; -err_already_mapped: - mutex_unlock(&binder_alloc_mmap_lock); - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "%s: %d %lx-%lx %s failed %d\n", __func__, - alloc->pid, vma->vm_start, vma->vm_end, - failure_string, ret); - return ret; -} - - -void binder_alloc_deferred_release(struct binder_alloc *alloc) -{ - struct rb_node *n; - int buffers, page_count; - struct binder_buffer *buffer; - - buffers = 0; - spin_lock(&alloc->lock); - BUG_ON(alloc->vma); - - while ((n = rb_first(&alloc->allocated_buffers))) { - buffer = rb_entry(n, struct binder_buffer, rb_node); - - /* Transaction should already have been freed */ - BUG_ON(buffer->transaction); - - if (buffer->clear_on_free) { - binder_alloc_clear_buf(alloc, buffer); - buffer->clear_on_free = false; - } - binder_free_buf_locked(alloc, buffer); - buffers++; - } - - while (!list_empty(&alloc->buffers)) { - buffer = list_first_entry(&alloc->buffers, - struct binder_buffer, entry); - WARN_ON(!buffer->free); - - list_del(&buffer->entry); - WARN_ON_ONCE(!list_empty(&alloc->buffers)); - kfree(buffer); - } - - page_count = 0; - if (alloc->pages) { - int i; - - for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { - unsigned long page_addr; - bool on_lru; - - if (!alloc->pages[i].page_ptr) - continue; - - on_lru = list_lru_del(&binder_freelist, - &alloc->pages[i].lru); - page_addr = alloc->buffer + i * PAGE_SIZE; - binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%s: %d: page %d %s\n", - __func__, alloc->pid, i, - on_lru ? "on lru" : "active"); - __free_page(alloc->pages[i].page_ptr); - page_count++; - } - kfree(alloc->pages); - } - spin_unlock(&alloc->lock); - if (alloc->vma_vm_mm) - mmdrop(alloc->vma_vm_mm); - - binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE, - "%s: %d buffers %d, pages %d\n", - __func__, alloc->pid, buffers, page_count); -} - -/** - * binder_alloc_print_allocated() - print buffer info - * @m: seq_file for output via seq_printf() - * @alloc: binder_alloc for this proc - * - * Prints information about every buffer associated with - * the binder_alloc state to the given seq_file - */ -void binder_alloc_print_allocated(struct seq_file *m, - struct binder_alloc *alloc) -{ - struct binder_buffer *buffer; - struct rb_node *n; - - spin_lock(&alloc->lock); - for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) { - buffer = rb_entry(n, struct binder_buffer, rb_node); - seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n", - buffer->debug_id, - buffer->user_data - alloc->buffer, - buffer->data_size, buffer->offsets_size, - buffer->extra_buffers_size, - buffer->transaction ? "active" : "delivered"); - } - spin_unlock(&alloc->lock); -} - -/** - * binder_alloc_print_pages() - print page usage - * @m: seq_file for output via seq_printf() - * @alloc: binder_alloc for this proc - */ -void binder_alloc_print_pages(struct seq_file *m, - struct binder_alloc *alloc) -{ - struct binder_lru_page *page; - int i; - int active = 0; - int lru = 0; - int free = 0; - - spin_lock(&alloc->lock); - /* - * Make sure the binder_alloc is fully initialized, otherwise we might - * read inconsistent state. - */ - if (binder_alloc_get_vma(alloc) != NULL) { - for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { - page = &alloc->pages[i]; - if (!page->page_ptr) - free++; - else if (list_empty(&page->lru)) - active++; - else - lru++; - } - } - spin_unlock(&alloc->lock); - seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); - seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); -} - -/** - * binder_alloc_get_allocated_count() - return count of buffers - * @alloc: binder_alloc for this proc - * - * Return: count of allocated buffers - */ -int binder_alloc_get_allocated_count(struct binder_alloc *alloc) -{ - struct rb_node *n; - int count = 0; - - spin_lock(&alloc->lock); - for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) - count++; - spin_unlock(&alloc->lock); - return count; -} - - -/** - * binder_alloc_vma_close() - invalidate address space - * @alloc: binder_alloc for this proc - * - * Called from binder_vma_close() when releasing address space. - * Clears alloc->vma to prevent new incoming transactions from - * allocating more buffers. - */ -void binder_alloc_vma_close(struct binder_alloc *alloc) -{ - binder_alloc_set_vma(alloc, NULL); -} - -/** - * binder_alloc_free_page() - shrinker callback to free pages - * @item: item to free - * @lock: lock protecting the item - * @cb_arg: callback argument - * - * Called from list_lru_walk() in binder_shrink_scan() to free - * up pages when the system is under memory pressure. - */ -enum lru_status binder_alloc_free_page(struct list_head *item, - struct list_lru_one *lru, - spinlock_t *lock, - void *cb_arg) - __must_hold(lock) -{ - struct binder_lru_page *page = container_of(item, typeof(*page), lru); - struct binder_alloc *alloc = page->alloc; - struct mm_struct *mm = alloc->vma_vm_mm; - struct vm_area_struct *vma; - struct page *page_to_free; - unsigned long page_addr; - size_t index; - - if (!mmget_not_zero(mm)) - goto err_mmget; - if (!mmap_read_trylock(mm)) - goto err_mmap_read_lock_failed; - if (!spin_trylock(&alloc->lock)) - goto err_get_alloc_lock_failed; - if (!page->page_ptr) - goto err_page_already_freed; - - index = page - alloc->pages; - page_addr = alloc->buffer + index * PAGE_SIZE; - - vma = find_vma(mm, page_addr); - if (vma && vma != binder_alloc_get_vma(alloc)) - goto err_invalid_vma; - - trace_binder_unmap_kernel_start(alloc, index); - - page_to_free = page->page_ptr; - page->page_ptr = NULL; - - trace_binder_unmap_kernel_end(alloc, index); - - list_lru_isolate(lru, item); - spin_unlock(&alloc->lock); - spin_unlock(lock); - - if (vma) { - trace_binder_unmap_user_start(alloc, index); - - zap_page_range(vma, page_addr, PAGE_SIZE); - - trace_binder_unmap_user_end(alloc, index); - } - - mmap_read_unlock(mm); - mmput_async(mm); - __free_page(page_to_free); - - spin_lock(lock); - return LRU_REMOVED_RETRY; - -err_invalid_vma: -err_page_already_freed: - spin_unlock(&alloc->lock); -err_get_alloc_lock_failed: - mmap_read_unlock(mm); -err_mmap_read_lock_failed: - mmput_async(mm); -err_mmget: - return LRU_SKIP; -} - -static unsigned long -binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc) -{ - return list_lru_count(&binder_freelist); -} - -static unsigned long -binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) -{ - return list_lru_walk(&binder_freelist, binder_alloc_free_page, - NULL, sc->nr_to_scan); -} - -static struct shrinker binder_shrinker = { - .count_objects = binder_shrink_count, - .scan_objects = binder_shrink_scan, - .seeks = DEFAULT_SEEKS, -}; - -/** - * binder_alloc_init() - called by binder_open() for per-proc initialization - * @alloc: binder_alloc for this proc - * - * Called from binder_open() to initialize binder_alloc fields for - * new binder proc - */ -void binder_alloc_init(struct binder_alloc *alloc) -{ - alloc->pid = current->group_leader->pid; - spin_lock_init(&alloc->lock); - INIT_LIST_HEAD(&alloc->buffers); -} - -int binder_alloc_shrinker_init(void) -{ - int ret = list_lru_init(&binder_freelist); - - if (ret == 0) { - ret = register_shrinker(&binder_shrinker); - if (ret) - list_lru_destroy(&binder_freelist); - } - return ret; -} - -void binder_alloc_shrinker_exit(void) -{ - unregister_shrinker(&binder_shrinker); - list_lru_destroy(&binder_freelist); -} - -/** - * check_buffer() - verify that buffer/offset is safe to access - * @alloc: binder_alloc for this proc - * @buffer: binder buffer to be accessed - * @offset: offset into @buffer data - * @bytes: bytes to access from offset - * - * Check that the @offset/@bytes are within the size of the given - * @buffer and that the buffer is currently active and not freeable. - * Offsets must also be multiples of sizeof(u32). The kernel is - * allowed to touch the buffer in two cases: - * - * 1) when the buffer is being created: - * (buffer->free == 0 && buffer->allow_user_free == 0) - * 2) when the buffer is being torn down: - * (buffer->free == 0 && buffer->transaction == NULL). - * - * Return: true if the buffer is safe to access - */ -static inline bool check_buffer(struct binder_alloc *alloc, - struct binder_buffer *buffer, - binder_size_t offset, size_t bytes) -{ - size_t buffer_size = binder_alloc_buffer_size(alloc, buffer); - - return buffer_size >= bytes && - offset <= buffer_size - bytes && - IS_ALIGNED(offset, sizeof(u32)) && - !buffer->free && - (!buffer->allow_user_free || !buffer->transaction); -} - /** * binder_alloc_copy_user_to_buffer() - copy src user to tgt user * @alloc: binder_alloc for this proc @@ -1328,3 +1301,8 @@ int binder_alloc_copy_from_buffer(struct binder_alloc *alloc, dest, bytes); } +void binder_alloc_shrinker_exit(void) +{ + unregister_shrinker(&binder_shrinker); + list_lru_destroy(&binder_alloc_lru); +} diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index 93537957f4cc..399f2b269f2c 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -9,13 +9,13 @@ #include #include #include -#include +#include #include #include #include #include -extern struct list_lru binder_freelist; +extern struct list_lru binder_alloc_lru; struct binder_transaction; /** @@ -49,19 +49,21 @@ struct binder_buffer { unsigned async_transaction:1; unsigned oneway_spam_suspect:1; unsigned debug_id:27; + struct binder_transaction *transaction; + struct binder_node *target_node; size_t data_size; size_t offsets_size; size_t extra_buffers_size; - unsigned long user_data; - int pid; + void __user *user_data; + int pid; }; /** * struct binder_lru_page - page object used for binder shrinker * @page_ptr: pointer to physical page in mmap'd space - * @lru: entry in binder_freelist + * @lru: entry in binder_alloc_lru * @alloc: binder_alloc for a proc */ struct binder_lru_page { @@ -72,7 +74,6 @@ struct binder_lru_page { /** * struct binder_alloc - per-binder proc state for binder allocator - * @lock: protects binder_alloc fields * @vma: vm_area_struct passed to mmap_handler * (invarient after mmap) * @tsk: tid for task that called init for this proc @@ -98,10 +99,10 @@ struct binder_lru_page { * struct binder_buffer objects used to track the user buffers */ struct binder_alloc { - spinlock_t lock; + struct mutex mutex; struct vm_area_struct *vma; struct mm_struct *vma_vm_mm; - unsigned long buffer; + void __user *buffer; struct list_head buffers; struct rb_root free_buffers; struct rb_root allocated_buffers; @@ -122,26 +123,27 @@ static inline void binder_selftest_alloc(struct binder_alloc *alloc) {} enum lru_status binder_alloc_free_page(struct list_head *item, struct list_lru_one *lru, spinlock_t *lock, void *cb_arg); -struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, - size_t data_size, - size_t offsets_size, - size_t extra_buffers_size, - int is_async); -void binder_alloc_init(struct binder_alloc *alloc); -int binder_alloc_shrinker_init(void); -void binder_alloc_shrinker_exit(void); -void binder_alloc_vma_close(struct binder_alloc *alloc); -struct binder_buffer * +extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, + size_t data_size, + size_t offsets_size, + size_t extra_buffers_size, + int is_async, + int pid); +extern void binder_alloc_init(struct binder_alloc *alloc); +extern int binder_alloc_shrinker_init(void); +extern void binder_alloc_shrinker_exit(void); +extern void binder_alloc_vma_close(struct binder_alloc *alloc); +extern struct binder_buffer * binder_alloc_prepare_to_free(struct binder_alloc *alloc, - unsigned long user_ptr); -void binder_alloc_free_buf(struct binder_alloc *alloc, - struct binder_buffer *buffer); -int binder_alloc_mmap_handler(struct binder_alloc *alloc, - struct vm_area_struct *vma); -void binder_alloc_deferred_release(struct binder_alloc *alloc); -int binder_alloc_get_allocated_count(struct binder_alloc *alloc); -void binder_alloc_print_allocated(struct seq_file *m, - struct binder_alloc *alloc); + uintptr_t user_ptr); +extern void binder_alloc_free_buf(struct binder_alloc *alloc, + struct binder_buffer *buffer); +extern int binder_alloc_mmap_handler(struct binder_alloc *alloc, + struct vm_area_struct *vma); +extern void binder_alloc_deferred_release(struct binder_alloc *alloc); +extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc); +extern void binder_alloc_print_allocated(struct seq_file *m, + struct binder_alloc *alloc); void binder_alloc_print_pages(struct seq_file *m, struct binder_alloc *alloc); @@ -156,9 +158,9 @@ binder_alloc_get_free_async_space(struct binder_alloc *alloc) { size_t free_async_space; - spin_lock(&alloc->lock); + mutex_lock(&alloc->mutex); free_async_space = alloc->free_async_space; - spin_unlock(&alloc->lock); + mutex_unlock(&alloc->mutex); return free_async_space; } diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c index c839c490fde3..c2b323bc3b3a 100644 --- a/drivers/android/binder_alloc_selftest.c +++ b/drivers/android/binder_alloc_selftest.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* binder_alloc_selftest.c * * Android IPC Subsystem * * Copyright (C) 2017 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt From b81bd3cc3f393589eb516898bcf2830489d6e473 Mon Sep 17 00:00:00 2001 From: kondors1995 Date: Wed, 7 Feb 2024 15:48:47 +0200 Subject: [PATCH 4/9] Revert "FROMGIT: binder: use EPOLLERR from eventpoll.h" This reverts commit a1d050902dec67683ae4c0e5f030e4d82f32eafa. --- drivers/android/binder.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 55274e7c15da..02e6c7456ba9 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -4787,7 +4787,7 @@ static unsigned int binder_poll(struct file *filp, thread = binder_get_thread(proc); if (!thread) - return EPOLLERR; + return POLLERR; binder_inner_proc_lock(thread->proc); thread->looper |= BINDER_LOOPER_STATE_POLL; From f676ed7aa6978997840cbe622acaca0ab6348d0e Mon Sep 17 00:00:00 2001 From: John Galt Date: Fri, 12 Jan 2024 22:45:00 -0500 Subject: [PATCH 5/9] treewide: revert over inline optimization For upcoming changes --- Makefile | 6 ------ arch/arm64/crypto/Makefile | 4 ---- arch/arm64/kernel/Makefile | 5 ----- arch/arm64/lib/Makefile | 4 ---- arch/arm64/mm/Makefile | 2 -- block/Makefile | 5 +---- crypto/Makefile | 3 --- drivers/android/Makefile | 2 -- drivers/block/Makefile | 3 --- drivers/block/zram/Makefile | 4 ---- drivers/char/Makefile | 3 --- drivers/cpufreq/Makefile | 5 ----- drivers/devfreq/Makefile | 2 -- drivers/gpu/drm/Makefile | 3 --- drivers/gpu/msm/Makefile | 2 -- drivers/input/fingerprint/goodix_ta/Makefile | 4 ---- drivers/input/touchscreen/goodix_driver_gt9886/Makefile | 2 -- drivers/staging/android/Makefile | 4 ---- drivers/staging/android/ion/Makefile | 2 -- fs/erofs/Makefile | 5 +++++ fs/f2fs/Makefile | 3 --- fs/fuse/Makefile | 3 --- init/Makefile | 2 -- kernel/Makefile | 5 ----- kernel/cgroup/Makefile | 2 -- kernel/irq/Makefile | 3 --- kernel/rcu/Makefile | 3 --- kernel/sched/Makefile | 3 --- lib/Makefile | 3 --- lib/lz4/Makefile | 3 +-- mm/Makefile | 3 --- net/Makefile | 3 --- 32 files changed, 7 insertions(+), 99 deletions(-) create mode 100644 fs/erofs/Makefile diff --git a/Makefile b/Makefile index 8850b2ca0d57..c0f7e2c94078 100644 --- a/Makefile +++ b/Makefile @@ -800,12 +800,6 @@ ifdef CONFIG_MINIMAL_TRACING_FOR_IORAP KBUILD_CFLAGS += -DNOTRACE endif -ifdef CONFIG_INLINE_OPTIMIZATION -KBUILD_CFLAGS += -mllvm -inline-threshold=2000 -KBUILD_CFLAGS += -mllvm -inlinehint-threshold=3000 -KBUILD_CFLAGS += -mllvm -unroll-threshold=1200 -endif - # Tell gcc to never replace conditional load with a non-conditional one KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) KBUILD_CFLAGS += $(call cc-option,-fno-allow-store-data-races) diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index 92dde6398c30..44a37a048c77 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -7,10 +7,6 @@ # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # -ccflags-y += -O3 -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 - obj-$(CONFIG_CRYPTO_SHA1_ARM64_CE) += sha1-ce.o sha1-ce-y := sha1-ce-glue.o sha1-ce-core.o diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index aea398486ed8..ed671070fa32 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -11,12 +11,7 @@ CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_insn.o = -pg CFLAGS_REMOVE_return_address.o = -pg -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 - CFLAGS_setup.o = -DUTS_MACHINE='"$(UTS_MACHINE)"' -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 # Object file lists. arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 209a0734c715..3e5ff045fa87 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -1,8 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-y += -O3 -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 - lib-y := checksum.o clear_user.o delay.o copy_from_user.o \ copy_to_user.o copy_in_user.o copy_page.o \ clear_page.o memchr.o memcpy.o memset.o \ diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index e2ce353d32cc..849c1df3d214 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile @@ -1,6 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 obj-y := dma-mapping.o extable.o fault.o init.o \ cache.o copypage.o flush.o \ ioremap.o mmap.o pgd.o mmu.o \ diff --git a/block/Makefile b/block/Makefile index 09f123f02b2e..ab14055d8222 100644 --- a/block/Makefile +++ b/block/Makefile @@ -11,9 +11,6 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \ genhd.o partition-generic.o ioprio.o \ badblocks.o partitions/ -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 - obj-$(CONFIG_BOUNCE) += bounce.o obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_ioctl.o obj-$(CONFIG_BLK_DEV_BSG) += bsg.o @@ -40,4 +37,4 @@ obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += keyslot-manager.o bio-crypt-ctx.o \ blk-crypto.o -obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o +obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o \ No newline at end of file diff --git a/crypto/Makefile b/crypto/Makefile index 526febafa420..ecb86e54b096 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -3,9 +3,6 @@ # Cryptographic API # -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 - obj-$(CONFIG_CRYPTO) += crypto.o crypto-y := api.o cipher.o compress.o memneq.o diff --git a/drivers/android/Makefile b/drivers/android/Makefile index 53989be6e157..7c91293b6d59 100644 --- a/drivers/android/Makefile +++ b/drivers/android/Makefile @@ -1,5 +1,3 @@ -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 ccflags-y += -I$(src) # needed for trace events obj-$(CONFIG_ANDROID_BINDERFS) += binderfs.o diff --git a/drivers/block/Makefile b/drivers/block/Makefile index 75c9547ed8d8..c0ff3d997271 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -6,9 +6,6 @@ # Rewritten to use lists instead of if-statements. # -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 - obj-$(CONFIG_MAC_FLOPPY) += swim3.o obj-$(CONFIG_BLK_DEV_SWIM) += swim_mod.o obj-$(CONFIG_BLK_DEV_FD) += floppy.o diff --git a/drivers/block/zram/Makefile b/drivers/block/zram/Makefile index f5f5d8fa0f2a..f8ebdea5c113 100644 --- a/drivers/block/zram/Makefile +++ b/drivers/block/zram/Makefile @@ -1,8 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 -ccflags-y += -O3 - zram-y := zcomp.o zram_drv.o zram-$(CONFIG_ZRAM_DEDUP) += zram_dedup.o diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 8ec2c413926a..17be42435cbe 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -2,9 +2,6 @@ # # Makefile for the kernel character device drivers. # -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 - obj-y += mem.o random.o obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o obj-y += misc.o diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 9301efae7062..6d92de038fdc 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -1,9 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 - -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 -ccflags-y += -O3 - # CPUfreq core obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile index 7e0874e4e823..be09908222c9 100644 --- a/drivers/devfreq/Makefile +++ b/drivers/devfreq/Makefile @@ -1,6 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 obj-$(CONFIG_PM_DEVFREQ) += devfreq.o obj-$(CONFIG_PM_DEVFREQ_EVENT) += devfreq-event.o obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 03953028a22e..19bfbc1beddf 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -3,9 +3,6 @@ # Makefile for the drm device driver. This driver provides support for the # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 - drm-y := drm_auth.o drm_bufs.o drm_cache.o \ drm_context.o drm_dma.o \ drm_file.o drm_gem.o drm_ioctl.o drm_irq.o \ diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile index 04a9e7a11a6a..dab939205127 100644 --- a/drivers/gpu/msm/Makefile +++ b/drivers/gpu/msm/Makefile @@ -1,6 +1,4 @@ ccflags-y := -Iinclude/linux -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 msm_kgsl_core-y = \ kgsl.o \ diff --git a/drivers/input/fingerprint/goodix_ta/Makefile b/drivers/input/fingerprint/goodix_ta/Makefile index 60b7fe9a29cb..701b102584eb 100644 --- a/drivers/input/fingerprint/goodix_ta/Makefile +++ b/drivers/input/fingerprint/goodix_ta/Makefile @@ -1,5 +1 @@ -ccflags-y += -O3 -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 - obj-$(CONFIG_FINGERPRINT_GOODIX_TA) += gf_spi.o diff --git a/drivers/input/touchscreen/goodix_driver_gt9886/Makefile b/drivers/input/touchscreen/goodix_driver_gt9886/Makefile index b21853c3f6d2..995cf9f0b057 100644 --- a/drivers/input/touchscreen/goodix_driver_gt9886/Makefile +++ b/drivers/input/touchscreen/goodix_driver_gt9886/Makefile @@ -1,5 +1,3 @@ -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 obj-$(CONFIG_TOUCHSCREEN_GOODIX_GTX8) += goodix_ts_i2c.o obj-$(CONFIG_TOUCHSCREEN_GOODIX_GTX8) += goodix_ts_core.o obj-$(CONFIG_TOUCHSCREEN_GOODIX_GTX8) += goodix_cfg_bin.o diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile index d2656bc74121..766a635e0bbf 100644 --- a/drivers/staging/android/Makefile +++ b/drivers/staging/android/Makefile @@ -1,7 +1,3 @@ -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 -ccflags-y += -O3 - ccflags-y += -I$(src) # needed for trace events obj-y += ion/ diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile index 5e3e654d8a33..4224f9ab0d2e 100644 --- a/drivers/staging/android/ion/Makefile +++ b/drivers/staging/android/ion/Makefile @@ -1,6 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 obj-$(CONFIG_ION) += ion.o ion_heap.o \ ion_page_pool.o ion_system_heap.o \ ion_carveout_heap.o ion_chunk_heap.o \ diff --git a/fs/erofs/Makefile b/fs/erofs/Makefile new file mode 100644 index 000000000000..23d26d8d4a90 --- /dev/null +++ b/fs/erofs/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_EROFS_FS) += erofs.o +erofs-objs := super.o inode.o data.o namei.o dir.o utils.o pcpubuf.o +erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o +erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o diff --git a/fs/f2fs/Makefile b/fs/f2fs/Makefile index 1190427cd0a4..8a7322d229e4 100644 --- a/fs/f2fs/Makefile +++ b/fs/f2fs/Makefile @@ -1,7 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 - obj-$(CONFIG_F2FS_FS) += f2fs.o f2fs-y := dir.o file.o inode.o namei.o hash.o super.o inline.o diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile index 762b8288ff68..9b0821548ab4 100644 --- a/fs/fuse/Makefile +++ b/fs/fuse/Makefile @@ -2,9 +2,6 @@ # Makefile for the FUSE filesystem. # -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 - obj-$(CONFIG_FUSE_FS) += fuse.o obj-$(CONFIG_CUSE) += cuse.o diff --git a/init/Makefile b/init/Makefile index 025f813167c0..e5dd3192ed35 100644 --- a/init/Makefile +++ b/init/Makefile @@ -4,8 +4,6 @@ # ccflags-y := -fno-function-sections -fno-data-sections -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 obj-y := main.o version.o mounts.o obj-y += noinitramfs.o diff --git a/kernel/Makefile b/kernel/Makefile index 5b35dfa70032..0a5c93f005d1 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -34,15 +34,10 @@ CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) # cond_syscall is currently not LTO compatible CFLAGS_sys_ni.o = $(DISABLE_LTO) -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 # Don't instrument error handlers CFLAGS_cfi.o = $(DISABLE_CFI_CLANG) -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 - obj-y += sched/ obj-y += locking/ obj-y += power/ diff --git a/kernel/cgroup/Makefile b/kernel/cgroup/Makefile index 3bee28d3cc40..3fd48a8123af 100644 --- a/kernel/cgroup/Makefile +++ b/kernel/cgroup/Makefile @@ -1,6 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 obj-y := cgroup.o namespace.o cgroup-v1.o freezer.o obj-$(CONFIG_CGROUP_FREEZER) += legacy_freezer.o diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile index 9d22287286e0..d7561172265b 100644 --- a/kernel/irq/Makefile +++ b/kernel/irq/Makefile @@ -1,8 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 - obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o obj-$(CONFIG_IRQ_TIMINGS) += timings.o obj-$(CONFIG_GENERIC_IRQ_CHIP) += generic-chip.o diff --git a/kernel/rcu/Makefile b/kernel/rcu/Makefile index acf53d4ea72f..020e8b6a644b 100644 --- a/kernel/rcu/Makefile +++ b/kernel/rcu/Makefile @@ -3,9 +3,6 @@ # and is generally not a function of system call inputs. KCOV_INSTRUMENT := n -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 - obj-y += update.o sync.o obj-$(CONFIG_TREE_SRCU) += srcutree.o obj-$(CONFIG_TINY_SRCU) += srcutiny.o diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 259ef5e43bf6..9e582d302b49 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -16,9 +16,6 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer endif -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 - obj-y += core.o loadavg.o clock.o cputime.o obj-y += idle_task.o fair.o rt.o deadline.o obj-y += wait.o wait_bit.o swait.o completion.o idle.o diff --git a/lib/Makefile b/lib/Makefile index b86a6baf5cb0..da9bbd7be840 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -33,9 +33,6 @@ endif CFLAGS_string.o += $(call cc-option, -fno-stack-protector) endif -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 - lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o timerqueue.o\ idr.o int_sqrt.o extable.o \ diff --git a/lib/lz4/Makefile b/lib/lz4/Makefile index 97400f18cabf..f7b113271d13 100644 --- a/lib/lz4/Makefile +++ b/lib/lz4/Makefile @@ -1,5 +1,4 @@ -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 +ccflags-y += -O3 obj-$(CONFIG_LZ4_COMPRESS) += lz4_compress.o obj-$(CONFIG_LZ4HC_COMPRESS) += lz4hc_compress.o diff --git a/mm/Makefile b/mm/Makefile index cb2f1a7a2989..702791a9a56a 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -21,9 +21,6 @@ KCOV_INSTRUMENT_memcontrol.o := n KCOV_INSTRUMENT_mmzone.o := n KCOV_INSTRUMENT_vmstat.o := n -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 - mmu-y := nommu.o mmu-$(CONFIG_MMU) := gup.o memory.o mincore.o \ mlock.o mmap.o mprotect.o mremap.o msync.o mmu_gather.o \ diff --git a/net/Makefile b/net/Makefile index ad1883ee733d..eb8767e90d1e 100644 --- a/net/Makefile +++ b/net/Makefile @@ -6,9 +6,6 @@ # Rewritten to use lists instead of if-statements. # -ccflags-y += -mllvm -inline-threshold=15000 -ccflags-y += -mllvm -inlinehint-threshold=10000 - obj-$(CONFIG_NET) := socket.o core/ tmp-$(CONFIG_COMPAT) := compat.o From bb68ce264124fee0ef1fbf1059e8e97c6635ee22 Mon Sep 17 00:00:00 2001 From: John Galt Date: Wed, 24 Jan 2024 22:33:06 -0500 Subject: [PATCH 6/9] qcacld-3.0: minimally utilize LTO: Only for better DCE and CFG simplification. Otherwise qcacld-3.0 output size for any benefit is far too significant. However the DCE is worthwhile --- drivers/staging/qcacld-3.0/Kbuild | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/staging/qcacld-3.0/Kbuild b/drivers/staging/qcacld-3.0/Kbuild index a6a21234eaf0..21e59fdbc56e 100644 --- a/drivers/staging/qcacld-3.0/Kbuild +++ b/drivers/staging/qcacld-3.0/Kbuild @@ -2301,6 +2301,7 @@ OBJS += $(TXRX3.0_OBJS) endif ccflags-y += $(INCS) +ldflags-y += -Wl --lto-O0 # LTO does way too much here ccflags-y += $(DISABLE_LTO) From 1f63660633656d19c587bb86cf3319df17ee8c5c Mon Sep 17 00:00:00 2001 From: kondors1995 Date: Thu, 25 Jan 2024 12:13:42 +0200 Subject: [PATCH 7/9] binder: Fix -Wunused warning ../drivers/android/binder.c:3071:7: warning: unused variable 'err' [-Wunused-variable] 3071 | int err; --- drivers/android/binder.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 02e6c7456ba9..93d0d685b9a7 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -3074,7 +3074,7 @@ static void binder_transaction(struct binder_proc *proc, goto err_binder_alloc_buf_failed; } if (secctx) { - int err; + __maybe_unused int err; size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) + ALIGN(tr->offsets_size, sizeof(void *)) + ALIGN(extra_buffers_size, sizeof(void *)) - From 8d94d34622096f1f26cc622fd2c1f89ff43a45cb Mon Sep 17 00:00:00 2001 From: ergdevops Date: Sun, 26 Nov 2023 19:12:56 +0000 Subject: [PATCH 8/9] treewide: fix -Wmisleading-indentation warnings --- drivers/input/fingerprint/goodix_ta/gf_spi.c | 66 +++-- drivers/input/misc/qti-haptics.c | 76 ++--- .../msm/camera/cam_utils/cam_packet_util.c | 206 +++++++------ drivers/platform/msm/ipa/ipa_v3/ipa_eth_i.c | 243 +++++++-------- drivers/platform/msm/usb_bam.c | 278 +++++++++--------- fs/crypto/policy.c | 15 +- 6 files changed, 443 insertions(+), 441 deletions(-) diff --git a/drivers/input/fingerprint/goodix_ta/gf_spi.c b/drivers/input/fingerprint/goodix_ta/gf_spi.c index 2b2568da8bf1..27774b8aef54 100644 --- a/drivers/input/fingerprint/goodix_ta/gf_spi.c +++ b/drivers/input/fingerprint/goodix_ta/gf_spi.c @@ -100,40 +100,44 @@ static inline int gf_power_switch(struct gf_dev *gf_dev, int status) } static inline void gf_setup(struct gf_dev *gf_dev) { - gf_dev->pwr_gpio = of_get_named_gpio(gf_dev->spi->dev.of_node, - "fp-gpio-pwr", 0); - gpio_request(gf_dev->pwr_gpio, "goodix_pwr"); - //gpio_direction_output(gf_dev->pwr_gpio, 1); // will be turned on through ioctl - gf_dev->rst_gpio = of_get_named_gpio(gf_dev->spi->dev.of_node, - "goodix,gpio-reset", 0); - gpio_request(gf_dev->rst_gpio, "gpio-reset"); - gpio_direction_output(gf_dev->rst_gpio, 1); - gf_dev->irq_gpio = of_get_named_gpio(gf_dev->spi->dev.of_node, - "goodix,gpio-irq", 0); - gpio_request(gf_dev->irq_gpio, "gpio-irq"); - gpio_direction_input(gf_dev->irq_gpio); - gf_dev->irq = gpio_to_irq(gf_dev->irq_gpio); - if (!request_threaded_irq(gf_dev->irq, NULL, gf_irq, - IRQF_TRIGGER_RISING | IRQF_ONESHOT, - "gf", gf_dev)) - enable_irq_wake(gf_dev->irq); - gf_dev->irq_enabled = 1; - irq_switch(gf_dev, 0); - return; + gf_dev->pwr_gpio = of_get_named_gpio(gf_dev->spi->dev.of_node, + "fp-gpio-pwr", 0); + gpio_request(gf_dev->pwr_gpio, "goodix_pwr"); + //gpio_direction_output(gf_dev->pwr_gpio, 1); // will be turned on through ioctl + gf_dev->rst_gpio = of_get_named_gpio(gf_dev->spi->dev.of_node, + "goodix,gpio-reset", 0); + gpio_request(gf_dev->rst_gpio, "gpio-reset"); + gpio_direction_output(gf_dev->rst_gpio, 1); + gf_dev->irq_gpio = of_get_named_gpio(gf_dev->spi->dev.of_node, + "goodix,gpio-irq", 0); + gpio_request(gf_dev->irq_gpio, "gpio-irq"); + gpio_direction_input(gf_dev->irq_gpio); + gf_dev->irq = gpio_to_irq(gf_dev->irq_gpio); + if (!request_threaded_irq(gf_dev->irq, gf_irq, NULL, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + "gf", gf_dev)) { + enable_irq_wake(gf_dev->irq); + gf_dev->irq_enabled = 1; + irq_switch(gf_dev, 0); + } + return; } static inline void gf_cleanup(struct gf_dev *gf_dev) { - if (gf_dev->irq_enabled) { - irq_switch(gf_dev, 0); - free_irq(gf_dev->irq, gf_dev); - } - if (gpio_is_valid(gf_dev->irq_gpio)) - gpio_free(gf_dev->irq_gpio); - if (gpio_is_valid(gf_dev->rst_gpio)) - gpio_free(gf_dev->rst_gpio); - if (gpio_is_valid(gf_dev->pwr_gpio)) - gf_power_switch(gf_dev, 0); - gpio_free(gf_dev->pwr_gpio); + if (gf_dev->irq_enabled) { + irq_switch(gf_dev, 0); + free_irq(gf_dev->irq, gf_dev); + } + if (gpio_is_valid(gf_dev->irq_gpio)) { + gpio_free(gf_dev->irq_gpio); + } + if (gpio_is_valid(gf_dev->rst_gpio)) { + gpio_free(gf_dev->rst_gpio); + } + if (gpio_is_valid(gf_dev->pwr_gpio)) { + gf_power_switch(gf_dev, 0); + gpio_free(gf_dev->pwr_gpio); + } } static inline void gpio_reset(struct gf_dev *gf_dev) { diff --git a/drivers/input/misc/qti-haptics.c b/drivers/input/misc/qti-haptics.c index e3f51616a1b9..367ef5d36e71 100644 --- a/drivers/input/misc/qti-haptics.c +++ b/drivers/input/misc/qti-haptics.c @@ -288,51 +288,51 @@ static int qti_haptics_read(struct qti_hap_chip *chip, } static int qti_haptics_write(struct qti_hap_chip *chip, - u8 addr, u8 *val, int len) + u8 addr, u8 *val, int len) { - int rc = 0, i; - unsigned long flags; + int rc = 0, i; + unsigned long flags; - spin_lock_irqsave(&chip->bus_lock, flags); - if (is_secure(addr)) { - for (i = 0; i < len; i++) { - rc = regmap_write(chip->regmap, - chip->reg_base + REG_HAP_SEC_ACCESS, - 0xA5); - if (rc < 0) { - dev_err(chip->dev, "write SEC_ACCESS failed, rc=%d\n", - rc); - goto unlock; - } + spin_lock_irqsave(&chip->bus_lock, flags); + if (is_secure(addr)) { + for (i = 0; i < len; i++) { + rc = regmap_write(chip->regmap, + chip->reg_base + REG_HAP_SEC_ACCESS, + 0xA5); + if (rc < 0) { + dev_err(chip->dev, "write SEC_ACCESS failed, rc=%d\n", + rc); + goto unlock; + } - rc = regmap_write(chip->regmap, - chip->reg_base + addr + i, val[i]); - if (rc < 0) { - dev_err(chip->dev, "write val 0x%x to addr 0x%x failed, rc=%d\n", - val[i], addr + i, rc); - goto unlock; - } - } - } else { - if (len > 1) - rc = regmap_bulk_write(chip->regmap, - chip->reg_base + addr, val, len); - else - rc = regmap_write(chip->regmap, - chip->reg_base + addr, *val); + rc = regmap_write(chip->regmap, + chip->reg_base + addr + i, val[i]); + if (rc < 0) { + dev_err(chip->dev, "write val 0x%x to addr 0x%x failed, rc=%d\n", + val[i], addr + i, rc); + goto unlock; + } + } + } else { + if (len > 1) + rc = regmap_bulk_write(chip->regmap, + chip->reg_base + addr, val, len); + else + rc = regmap_write(chip->regmap, + chip->reg_base + addr, *val); - if (rc < 0) - dev_err(chip->dev, "write addr 0x%x failed, rc=%d\n", - addr, rc); - } + if (rc < 0) + dev_err(chip->dev, "write addr 0x%x failed, rc=%d\n", + addr, rc); + } - for (i = 0; i < len; i++) - dev_dbg(chip->dev, "Update addr 0x%x to val 0x%x\n", - addr + i, val[i]); + for (i = 0; i < len; i++) + dev_dbg(chip->dev, "Update addr 0x%x to val 0x%x\n", + addr + i, val[i]); unlock: - spin_unlock_irqrestore(&chip->bus_lock, flags); - return rc; + spin_unlock_irqrestore(&chip->bus_lock, flags); + return rc; } static int qti_haptics_masked_write(struct qti_hap_chip *chip, u8 addr, diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c index 0931580f8ef1..bc28d23db2e2 100644 --- a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c +++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c @@ -395,113 +395,109 @@ rel_cmd_buf: } int32_t cam_packet_validate_plane_size( - struct cam_buf_io_cfg *io_cfg, - int plane_index, - size_t size) + struct cam_buf_io_cfg *io_cfg, + int plane_index, + size_t size) { - int rc = 0; - uint32_t kmd_plane_size = 0; - uint32_t plane_stride = 0; - uint32_t slice_height = 0; - uint32_t metadata_size = 0; - uint32_t format = io_cfg->format; - uint32_t plane_pixel_size = 0; + int rc = 0; + uint32_t kmd_plane_size = 0; + uint32_t plane_stride = 0; + uint32_t slice_height = 0; + uint32_t metadata_size = 0; + uint32_t format = io_cfg->format; + uint32_t plane_pixel_size = 0; - if (plane_index < CAM_PACKET_MAX_PLANES) { - plane_stride = io_cfg->planes[plane_index].plane_stride; - slice_height = io_cfg->planes[plane_index].slice_height; - } + if (plane_index < CAM_PACKET_MAX_PLANES) { + plane_stride = io_cfg->planes[plane_index].plane_stride; + slice_height = io_cfg->planes[plane_index].slice_height; + } - if (!(plane_stride && slice_height)) { - CAM_ERR(CAM_ISP, - "Invalid values from UMD stride %d, slice height %d", - plane_stride, - slice_height); - return -EINVAL; - } + if (!(plane_stride && slice_height)) { + CAM_ERR(CAM_ISP, + "Invalid values from UMD stride %d, slice height %d", + plane_stride, + slice_height); + return -EINVAL; + } - switch (format) { - case CAM_FORMAT_MIPI_RAW_6: - case CAM_FORMAT_MIPI_RAW_8: - kmd_plane_size = ((plane_stride * slice_height) + 16 - 1) - / 16 * 16; - break; - case CAM_FORMAT_MIPI_RAW_10: - if (plane_stride % 4 == 0) - kmd_plane_size = ((plane_stride * slice_height) - + 16 - 1) / 16 * 16; - break; - case CAM_FORMAT_MIPI_RAW_12: - if (plane_stride % 2 == 0) - kmd_plane_size = ((plane_stride * slice_height) - + 16 - 1) / 16 * 16; - break; - case CAM_FORMAT_MIPI_RAW_14: - if (plane_stride % 4 == 0) - kmd_plane_size = plane_stride * slice_height * 7 / 4; - break; - case CAM_FORMAT_PLAIN16_8: - case CAM_FORMAT_PLAIN16_10: - case CAM_FORMAT_PLAIN16_12: - case CAM_FORMAT_PLAIN16_14: - case CAM_FORMAT_PLAIN16_16: - case CAM_FORMAT_PLAIN64: - kmd_plane_size = plane_stride * slice_height; - break; - case CAM_FORMAT_NV21: - case CAM_FORMAT_NV12: - if (plane_index < CAM_PACKET_MAX_PLANES) - kmd_plane_size = plane_stride * slice_height; - break; - case CAM_FORMAT_PD10: - if (plane_index < CAM_PACKET_MAX_PLANES) - kmd_plane_size = plane_stride * slice_height; - break; - case CAM_FORMAT_UBWC_NV12: - case CAM_FORMAT_UBWC_NV12_4R: - case CAM_FORMAT_UBWC_TP10: - metadata_size = io_cfg->planes[plane_index].meta_size; - plane_pixel_size = ((plane_stride * slice_height) + - (4096 - 1)) & ~((uint32_t) 4096 - 1); - kmd_plane_size = metadata_size + plane_pixel_size; - break; - case CAM_FORMAT_UBWC_P010: - case CAM_FORMAT_PLAIN32_20: - case CAM_FORMAT_TP10: - case CAM_FORMAT_YUV422: - case CAM_FORMAT_PD8: - case CAM_FORMAT_PLAIN128: - case CAM_FORMAT_ARGB: - case CAM_FORMAT_ARGB_10: - case CAM_FORMAT_ARGB_12: - case CAM_FORMAT_ARGB_14: - case CAM_FORMAT_MIPI_RAW_16: - case CAM_FORMAT_MIPI_RAW_20: - case CAM_FORMAT_QTI_RAW_8: - case CAM_FORMAT_QTI_RAW_10: - case CAM_FORMAT_QTI_RAW_12: - case CAM_FORMAT_QTI_RAW_14: - case CAM_FORMAT_PLAIN8: - case CAM_FORMAT_PLAIN8_SWAP: - case CAM_FORMAT_PLAIN8_10: - case CAM_FORMAT_PLAIN8_10_SWAP: - kmd_plane_size = plane_stride * slice_height; - break; - default: - kmd_plane_size = plane_stride * slice_height; - break; - } - if (!kmd_plane_size || - kmd_plane_size > (size - io_cfg->offsets[plane_index])) { - CAM_ERR(CAM_ISP, - "kmd size: %d umd size: %d width: %d height: %d stride: %d sliceheight: %d ", - kmd_plane_size, - size, - io_cfg->planes[plane_index].width, - io_cfg->planes[plane_index].height, - plane_stride, - slice_height); - return -EINVAL; - } - return rc; + switch (format) { + case CAM_FORMAT_MIPI_RAW_6: + case CAM_FORMAT_MIPI_RAW_8: + kmd_plane_size = ((plane_stride * slice_height) + 16 - 1) / 16 * 16; + break; + case CAM_FORMAT_MIPI_RAW_10: + if (plane_stride % 4 == 0) + kmd_plane_size = ((plane_stride * slice_height) + 16 - 1) / 16 * 16; + break; + case CAM_FORMAT_MIPI_RAW_12: + if (plane_stride % 2 == 0) + kmd_plane_size = ((plane_stride * slice_height) + 16 - 1) / 16 * 16; + break; + case CAM_FORMAT_MIPI_RAW_14: + if (plane_stride % 4 == 0) + kmd_plane_size = plane_stride * slice_height * 7 / 4; + break; + case CAM_FORMAT_PLAIN16_8: + case CAM_FORMAT_PLAIN16_10: + case CAM_FORMAT_PLAIN16_12: + case CAM_FORMAT_PLAIN16_14: + case CAM_FORMAT_PLAIN16_16: + case CAM_FORMAT_PLAIN64: + kmd_plane_size = plane_stride * slice_height; + break; + case CAM_FORMAT_NV21: + case CAM_FORMAT_NV12: + if (plane_index < CAM_PACKET_MAX_PLANES) + kmd_plane_size = plane_stride * slice_height; + break; + case CAM_FORMAT_PD10: + if (plane_index < CAM_PACKET_MAX_PLANES) + kmd_plane_size = plane_stride * slice_height; + break; + case CAM_FORMAT_UBWC_NV12: + case CAM_FORMAT_UBWC_NV12_4R: + case CAM_FORMAT_UBWC_TP10: + metadata_size = io_cfg->planes[plane_index].meta_size; + plane_pixel_size = ((plane_stride * slice_height) + (4096 - 1)) & ~((uint32_t)4096 - 1); + kmd_plane_size = metadata_size + plane_pixel_size; + break; + case CAM_FORMAT_UBWC_P010: + case CAM_FORMAT_PLAIN32_20: + case CAM_FORMAT_TP10: + case CAM_FORMAT_YUV422: + case CAM_FORMAT_PD8: + case CAM_FORMAT_PLAIN128: + case CAM_FORMAT_ARGB: + case CAM_FORMAT_ARGB_10: + case CAM_FORMAT_ARGB_12: + case CAM_FORMAT_ARGB_14: + case CAM_FORMAT_MIPI_RAW_16: + case CAM_FORMAT_MIPI_RAW_20: + case CAM_FORMAT_QTI_RAW_8: + case CAM_FORMAT_QTI_RAW_10: + case CAM_FORMAT_QTI_RAW_12: + case CAM_FORMAT_QTI_RAW_14: + case CAM_FORMAT_PLAIN8: + case CAM_FORMAT_PLAIN8_SWAP: + case CAM_FORMAT_PLAIN8_10: + case CAM_FORMAT_PLAIN8_10_SWAP: + kmd_plane_size = plane_stride * slice_height; + break; + default: + kmd_plane_size = plane_stride * slice_height; + break; + } + if (!kmd_plane_size || + kmd_plane_size > (size - io_cfg->offsets[plane_index])) { + CAM_ERR(CAM_ISP, + "kmd size: %d umd size: %d width: %d height: %d stride: %d sliceheight: %d ", + kmd_plane_size, + size, + io_cfg->planes[plane_index].width, + io_cfg->planes[plane_index].height, + plane_stride, + slice_height); + return -EINVAL; + } + return rc; } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_eth_i.c b/drivers/platform/msm/ipa/ipa_v3/ipa_eth_i.c index a1d74a714023..9213dd08cdca 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_eth_i.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_eth_i.c @@ -199,135 +199,136 @@ static void ipa_eth_gsi_chan_err_cb(struct gsi_chan_err_notify *notify) static int ipa_eth_setup_rtk_gsi_channel( - struct ipa_eth_client_pipe_info *pipe, - struct ipa3_ep_context *ep) + struct ipa_eth_client_pipe_info *pipe, + struct ipa3_ep_context *ep) { - struct gsi_evt_ring_props gsi_evt_ring_props; - struct gsi_chan_props gsi_channel_props; - union __packed gsi_channel_scratch ch_scratch; - union __packed gsi_evt_scratch evt_scratch; - const struct ipa_gsi_ep_config *gsi_ep_info; - int result, len; - int queue_number; - u64 bar_addr; + struct gsi_evt_ring_props gsi_evt_ring_props; + struct gsi_chan_props gsi_channel_props; + union __packed gsi_channel_scratch ch_scratch; + union __packed gsi_evt_scratch evt_scratch; + const struct ipa_gsi_ep_config *gsi_ep_info; + int result, len; + int queue_number; + u64 bar_addr; - if (unlikely(!pipe->info.is_transfer_ring_valid)) { - IPAERR("RTK transfer ring invalid\n"); - ipa_assert(); - return -EFAULT; - } + if (unlikely(!pipe->info.is_transfer_ring_valid)) { + IPAERR("RTK transfer ring invalid\n"); + ipa_assert(); + return -EFAULT; + } - /* setup event ring */ - bar_addr = - IPA_ETH_PCIE_SET(pipe->info.client_info.rtk.bar_addr); - memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props)); - gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_RTK_EV; - gsi_evt_ring_props.intr = GSI_INTR_MSI; - gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B; - if (pipe->dir == IPA_ETH_PIPE_DIR_TX) { - gsi_evt_ring_props.int_modt = IPA_ETH_RTK_MODT; - gsi_evt_ring_props.int_modc = IPA_ETH_RTK_MODC; - } - gsi_evt_ring_props.exclusive = true; - gsi_evt_ring_props.err_cb = ipa_eth_gsi_evt_ring_err_cb; - gsi_evt_ring_props.user_data = NULL; - gsi_evt_ring_props.msi_addr = - bar_addr + - pipe->info.client_info.rtk.dest_tail_ptr_offs; - len = pipe->info.transfer_ring_size; - gsi_evt_ring_props.ring_len = len; - gsi_evt_ring_props.ring_base_addr = - (u64)pipe->info.transfer_ring_base; - result = gsi_alloc_evt_ring(&gsi_evt_ring_props, - ipa3_ctx->gsi_dev_hdl, - &ep->gsi_evt_ring_hdl); - if (result != GSI_STATUS_SUCCESS) { - IPAERR("fail to alloc RX event ring\n"); - return -EFAULT; - } - ep->gsi_mem_info.evt_ring_len = - gsi_evt_ring_props.ring_len; - ep->gsi_mem_info.evt_ring_base_addr = - gsi_evt_ring_props.ring_base_addr; + /* setup event ring */ + bar_addr = + IPA_ETH_PCIE_SET(pipe->info.client_info.rtk.bar_addr); + memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props)); + gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_RTK_EV; + gsi_evt_ring_props.intr = GSI_INTR_MSI; + gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B; + if (pipe->dir == IPA_ETH_PIPE_DIR_TX) { + gsi_evt_ring_props.int_modt = IPA_ETH_RTK_MODT; + gsi_evt_ring_props.int_modc = IPA_ETH_RTK_MODC; + } + gsi_evt_ring_props.exclusive = true; + gsi_evt_ring_props.err_cb = ipa_eth_gsi_evt_ring_err_cb; + gsi_evt_ring_props.user_data = NULL; + gsi_evt_ring_props.msi_addr = + bar_addr + + pipe->info.client_info.rtk.dest_tail_ptr_offs; + len = pipe->info.transfer_ring_size; + gsi_evt_ring_props.ring_len = len; + gsi_evt_ring_props.ring_base_addr = + (u64)pipe->info.transfer_ring_base; + result = gsi_alloc_evt_ring(&gsi_evt_ring_props, + ipa3_ctx->gsi_dev_hdl, + &ep->gsi_evt_ring_hdl); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("fail to alloc RX event ring\n"); + return -EFAULT; + } + ep->gsi_mem_info.evt_ring_len = + gsi_evt_ring_props.ring_len; + ep->gsi_mem_info.evt_ring_base_addr = + gsi_evt_ring_props.ring_base_addr; - /* setup channel ring */ - memset(&gsi_channel_props, 0, sizeof(gsi_channel_props)); - gsi_channel_props.prot = GSI_CHAN_PROT_RTK; - if (pipe->dir == IPA_ETH_PIPE_DIR_TX) - gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI; - else - gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI; - gsi_ep_info = ipa3_get_gsi_ep_info(ep->client); - if (!gsi_ep_info) { - IPAERR("Failed getting GSI EP info for client=%d\n", - ep->client); - result = -EINVAL; - goto fail_get_gsi_ep_info; - } else - gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num; - gsi_channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl; - gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B; - gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE; - gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG; - gsi_channel_props.prefetch_mode = - gsi_ep_info->prefetch_mode; - gsi_channel_props.empty_lvl_threshold = - gsi_ep_info->prefetch_threshold; - gsi_channel_props.low_weight = 1; - gsi_channel_props.err_cb = ipa_eth_gsi_chan_err_cb; - gsi_channel_props.ring_len = len; - gsi_channel_props.ring_base_addr = - (u64)pipe->info.transfer_ring_base; - result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl, - &ep->gsi_chan_hdl); - if (result != GSI_STATUS_SUCCESS) - goto fail_get_gsi_ep_info; - ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len; - ep->gsi_mem_info.chan_ring_base_addr = - gsi_channel_props.ring_base_addr; + /* setup channel ring */ + memset(&gsi_channel_props, 0, sizeof(gsi_channel_props)); + gsi_channel_props.prot = GSI_CHAN_PROT_RTK; + if (pipe->dir == IPA_ETH_PIPE_DIR_TX) + gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI; + else + gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI; + gsi_ep_info = ipa3_get_gsi_ep_info(ep->client); + if (!gsi_ep_info) { + IPAERR("Failed getting GSI EP info for client=%d\n", + ep->client); + result = -EINVAL; + goto fail_get_gsi_ep_info; + } else + gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num; + gsi_channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl; + gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B; + gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE; + gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG; + gsi_channel_props.prefetch_mode = + gsi_ep_info->prefetch_mode; + gsi_channel_props.empty_lvl_threshold = + gsi_ep_info->prefetch_threshold; + gsi_channel_props.low_weight = 1; + gsi_channel_props.err_cb = ipa_eth_gsi_chan_err_cb; + gsi_channel_props.ring_len = len; + gsi_channel_props.ring_base_addr = + (u64)pipe->info.transfer_ring_base; + result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl, + &ep->gsi_chan_hdl); + if (result != GSI_STATUS_SUCCESS) + goto fail_get_gsi_ep_info; + ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len; + ep->gsi_mem_info.chan_ring_base_addr = + gsi_channel_props.ring_base_addr; - /* write event scratch */ - memset(&evt_scratch, 0, sizeof(evt_scratch)); - /* nothing is needed for RTK event scratch */ + /* write event scratch */ + memset(&evt_scratch, 0, sizeof(evt_scratch)); + /* nothing is needed for RTK event scratch */ + + /* write ch scratch */ + queue_number = pipe->info.client_info.rtk.queue_number; + memset(&ch_scratch, 0, sizeof(ch_scratch)); + ch_scratch.rtk.rtk_bar_low = + (u32)bar_addr; + ch_scratch.rtk.rtk_bar_high = + (u32)((u64)(bar_addr) >> 32); + /* + * RX: Queue Number will be as is received from RTK + * (Range 0 - 15). + * TX: Queue Number will be configured to be + * either 16 or 18. + * (For TX Queue 0: Configure 16) + * (For TX Queue 1: Configure 18) + */ + ch_scratch.rtk.queue_number = + (pipe->dir == IPA_ETH_PIPE_DIR_RX) ? + pipe->info.client_info.rtk.queue_number : + (queue_number == 0) ? 16 : 18; + ch_scratch.rtk.fix_buff_size = + ilog2(pipe->info.fix_buffer_size); + ch_scratch.rtk.rtk_buff_addr_low = + (u32)pipe->info.data_buff_list[0].iova; + ch_scratch.rtk.rtk_buff_addr_high = + (u32)((u64)(pipe->info.data_buff_list[0].iova) >> 32); + result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("failed to write evt ring scratch\n"); + goto fail_write_scratch; + } + return 0; - /* write ch scratch */ - queue_number = pipe->info.client_info.rtk.queue_number; - memset(&ch_scratch, 0, sizeof(ch_scratch)); - ch_scratch.rtk.rtk_bar_low = - (u32)bar_addr; - ch_scratch.rtk.rtk_bar_high = - (u32)((u64)(bar_addr) >> 32); - /* - * RX: Queue Number will be as is received from RTK - * (Range 0 - 15). - * TX: Queue Number will be configured to be - * either 16 or 18. - * (For TX Queue 0: Configure 16) - * (For TX Queue 1: Configure 18) - */ - ch_scratch.rtk.queue_number = - (pipe->dir == IPA_ETH_PIPE_DIR_RX) ? - pipe->info.client_info.rtk.queue_number : - (queue_number == 0) ? 16 : 18; - ch_scratch.rtk.fix_buff_size = - ilog2(pipe->info.fix_buffer_size); - ch_scratch.rtk.rtk_buff_addr_low = - (u32)pipe->info.data_buff_list[0].iova; - ch_scratch.rtk.rtk_buff_addr_high = - (u32)((u64)(pipe->info.data_buff_list[0].iova) >> 32); - result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch); - if (result != GSI_STATUS_SUCCESS) { - IPAERR("failed to write evt ring scratch\n"); - goto fail_write_scratch; - } - return 0; fail_write_scratch: - gsi_dealloc_channel(ep->gsi_chan_hdl); - ep->gsi_chan_hdl = ~0; + gsi_dealloc_channel(ep->gsi_chan_hdl); + ep->gsi_chan_hdl = ~0; fail_get_gsi_ep_info: - gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl); - ep->gsi_evt_ring_hdl = ~0; - return result; + gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl); + ep->gsi_evt_ring_hdl = ~0; + return result; } static int ipa3_smmu_map_rtk_pipes(struct ipa_eth_client_pipe_info *pipe, diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c index b7dcab504a29..f7a5724b8845 100644 --- a/drivers/platform/msm/usb_bam.c +++ b/drivers/platform/msm/usb_bam.c @@ -1146,54 +1146,54 @@ static bool usb_bam_resume_core(enum usb_ctrl bam_type, * Return: 0 in case of success, errno otherwise. */ static int usb_bam_disconnect_ipa_prod( - struct usb_bam_connect_ipa_params *ipa_params, - enum usb_ctrl cur_bam) + struct usb_bam_connect_ipa_params *ipa_params, + enum usb_ctrl cur_bam) { - int ret; - u8 idx = 0; - struct usb_bam_pipe_connect *pipe_connect; - struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam]; + int ret; + u8 idx = 0; + struct usb_bam_pipe_connect *pipe_connect; + struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam]; - idx = ipa_params->dst_idx; - pipe_connect = &ctx->usb_bam_connections[idx]; - pipe_connect->activity_notify = NULL; - pipe_connect->inactivity_notify = NULL; - pipe_connect->priv = NULL; + idx = ipa_params->dst_idx; + pipe_connect = &ctx->usb_bam_connections[idx]; + pipe_connect->activity_notify = NULL; + pipe_connect->inactivity_notify = NULL; + pipe_connect->priv = NULL; - /* close IPA -> USB pipe */ - if (pipe_connect->pipe_type == USB_BAM_PIPE_BAM2BAM) { - ret = ipa_disconnect(ipa_params->prod_clnt_hdl); - if (ret) { - log_event_err("%s: dst pipe disconnection failure\n", - __func__); - return ret; - } + /* close IPA -> USB pipe */ + if (pipe_connect->pipe_type == USB_BAM_PIPE_BAM2BAM) { + ret = ipa_disconnect(ipa_params->prod_clnt_hdl); + if (ret) { + log_event_err("%s: dst pipe disconnection failure\n", + __func__); + return ret; + } - ret = usb_bam_disconnect_pipe(cur_bam, idx); - if (ret) { - log_event_err("%s: failure to disconnect pipe %d\n", - __func__, idx); - return ret; - } - } else { - ret = ipa_teardown_sys_pipe(ipa_params->prod_clnt_hdl); - if (ret) { - log_event_err("%s: dst pipe disconnection failure\n", - __func__); - return ret; - } + ret = usb_bam_disconnect_pipe(cur_bam, idx); + if (ret) { + log_event_err("%s: failure to disconnect pipe %d\n", + __func__, idx); + return ret; + } + } else { + ret = ipa_teardown_sys_pipe(ipa_params->prod_clnt_hdl); + if (ret) { + log_event_err("%s: dst pipe disconnection failure\n", + __func__); + return ret; + } - pipe_connect->enabled = false; - spin_lock(&ctx->usb_bam_lock); - if (ctx->pipes_enabled_per_bam == 0) - log_event_err("%s: wrong pipes enabled counter for bam=%d\n", - __func__, pipe_connect->bam_type); - else - ctx->pipes_enabled_per_bam -= 1; - spin_unlock(&ctx->usb_bam_lock); - } + pipe_connect->enabled = false; + spin_lock(&ctx->usb_bam_lock); + if (ctx->pipes_enabled_per_bam == 0) + log_event_err("%s: wrong pipes enabled counter for bam=%d\n", + __func__, pipe_connect->bam_type); + else + ctx->pipes_enabled_per_bam -= 1; + spin_unlock(&ctx->usb_bam_lock); + } - return 0; + return 0; } /** @@ -1208,117 +1208,117 @@ static int usb_bam_disconnect_ipa_prod( * Return: 0 in case of success, errno otherwise. */ static int usb_bam_disconnect_ipa_cons( - struct usb_bam_connect_ipa_params *ipa_params, - enum usb_ctrl cur_bam) + struct usb_bam_connect_ipa_params *ipa_params, + enum usb_ctrl cur_bam) { - int ret; - u8 idx = 0; - struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam]; - struct usb_bam_pipe_connect *pipe_connect; - struct sps_pipe *pipe; - u32 timeout = 10, pipe_empty; - struct usb_bam_sps_type usb_bam_sps = ctx->usb_bam_sps; - struct sps_connect *sps_connection; - bool inject_zlt = true; + int ret; + u8 idx = 0; + struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam]; + struct usb_bam_pipe_connect *pipe_connect; + struct sps_pipe *pipe; + u32 timeout = 10, pipe_empty; + struct usb_bam_sps_type usb_bam_sps = ctx->usb_bam_sps; + struct sps_connect *sps_connection; + bool inject_zlt = true; - idx = ipa_params->src_idx; - pipe = ctx->usb_bam_sps.sps_pipes[idx]; - pipe_connect = &ctx->usb_bam_connections[idx]; - sps_connection = &usb_bam_sps.sps_connections[idx]; + idx = ipa_params->src_idx; + pipe = ctx->usb_bam_sps.sps_pipes[idx]; + pipe_connect = &ctx->usb_bam_connections[idx]; + sps_connection = &usb_bam_sps.sps_connections[idx]; - pipe_connect->activity_notify = NULL; - pipe_connect->inactivity_notify = NULL; - pipe_connect->priv = NULL; + pipe_connect->activity_notify = NULL; + pipe_connect->inactivity_notify = NULL; + pipe_connect->priv = NULL; - /* - * On some platforms, there is a chance that flow control - * is disabled from IPA side, due to this IPA core may not - * consume data from USB. Hence notify IPA to enable flow - * control and then check sps pipe is empty or not before - * processing USB->IPA pipes disconnect. - */ - ipa_clear_endpoint_delay(ipa_params->cons_clnt_hdl); + /* + * On some platforms, there is a chance that flow control + * is disabled from IPA side, due to this IPA core may not + * consume data from USB. Hence notify IPA to enable flow + * control and then check sps pipe is empty or not before + * processing USB->IPA pipes disconnect. + */ + ipa_clear_endpoint_delay(ipa_params->cons_clnt_hdl); retry: - /* Make sure pipe is empty before disconnecting it */ - while (1) { - ret = sps_is_pipe_empty(pipe, &pipe_empty); - if (ret) { - log_event_err("%s: sps_is_pipe_empty failed with %d\n", - __func__, ret); - return ret; - } - if (pipe_empty || !--timeout) - break; + /* Make sure pipe is empty before disconnecting it */ + while (1) { + ret = sps_is_pipe_empty(pipe, &pipe_empty); + if (ret) { + log_event_err("%s: sps_is_pipe_empty failed with %d\n", + __func__, ret); + return ret; + } + if (pipe_empty || !--timeout) + break; - /* Check again */ - usleep_range(1000, 2000); - } + /* Check again */ + usleep_range(1000, 2000); + } - if (!pipe_empty) { - if (inject_zlt) { - pr_debug("%s: Inject ZLT\n", __func__); - log_event_dbg("%s: Inject ZLT\n", __func__); - inject_zlt = false; - sps_pipe_inject_zlt(sps_connection->destination, - sps_connection->dest_pipe_index); - timeout = 10; - goto retry; - } - log_event_err("%s: src pipe(USB) not empty, wait timed out!\n", - __func__); - sps_get_bam_debug_info(ctx->h_bam, 93, - (SPS_BAM_PIPE(0) | SPS_BAM_PIPE(1)), 0, 2); - ipa_bam_reg_dump(); - panic("%s:SPS pipe not empty for USB->IPA\n", __func__); - } + if (!pipe_empty) { + if (inject_zlt) { + pr_debug("%s: Inject ZLT\n", __func__); + log_event_dbg("%s: Inject ZLT\n", __func__); + inject_zlt = false; + sps_pipe_inject_zlt(sps_connection->destination, + sps_connection->dest_pipe_index); + timeout = 10; + goto retry; + } + log_event_err("%s: src pipe(USB) not empty, wait timed out!\n", + __func__); + sps_get_bam_debug_info(ctx->h_bam, 93, + (SPS_BAM_PIPE(0) | SPS_BAM_PIPE(1)), 0, 2); + ipa_bam_reg_dump(); + panic("%s:SPS pipe not empty for USB->IPA\n", __func__); + } - /* Do the release handshake with the IPA via RM */ - spin_lock(&usb_bam_ipa_handshake_info_lock); - info[cur_bam].connect_complete = 0; - info[cur_bam].disconnected = 1; - spin_unlock(&usb_bam_ipa_handshake_info_lock); + /* Do the release handshake with the IPA via RM */ + spin_lock(&usb_bam_ipa_handshake_info_lock); + info[cur_bam].connect_complete = 0; + info[cur_bam].disconnected = 1; + spin_unlock(&usb_bam_ipa_handshake_info_lock); - /* Start release handshake on the last USB BAM producer pipe */ - if (info[cur_bam].prod_pipes_enabled_per_bam == 1) - wait_for_prod_release(cur_bam); + /* Start release handshake on the last USB BAM producer pipe */ + if (info[cur_bam].prod_pipes_enabled_per_bam == 1) + wait_for_prod_release(cur_bam); - /* close USB -> IPA pipe */ - if (pipe_connect->pipe_type == USB_BAM_PIPE_BAM2BAM) { - ret = ipa_disconnect(ipa_params->cons_clnt_hdl); - if (ret) { - log_event_err("%s: src pipe disconnection failure\n", - __func__); - return ret; - } + /* close USB -> IPA pipe */ + if (pipe_connect->pipe_type == USB_BAM_PIPE_BAM2BAM) { + ret = ipa_disconnect(ipa_params->cons_clnt_hdl); + if (ret) { + log_event_err("%s: src pipe disconnection failure\n", + __func__); + return ret; + } - ret = usb_bam_disconnect_pipe(cur_bam, idx); - if (ret) { - log_event_err("%s: failure to disconnect pipe %d\n", - __func__, idx); - return ret; - } - } else { - ret = ipa_teardown_sys_pipe(ipa_params->cons_clnt_hdl); - if (ret) { - log_event_err("%s: src pipe disconnection failure\n", - __func__); - return ret; - } + ret = usb_bam_disconnect_pipe(cur_bam, idx); + if (ret) { + log_event_err("%s: failure to disconnect pipe %d\n", + __func__, idx); + return ret; + } + } else { + ret = ipa_teardown_sys_pipe(ipa_params->cons_clnt_hdl); + if (ret) { + log_event_err("%s: src pipe disconnection failure\n", + __func__); + return ret; + } - pipe_connect->enabled = false; - spin_lock(&ctx->usb_bam_lock); - if (ctx->pipes_enabled_per_bam == 0) - log_event_err("%s: wrong pipes enabled counter for bam=%d\n", - __func__, pipe_connect->bam_type); - else - ctx->pipes_enabled_per_bam -= 1; - spin_unlock(&ctx->usb_bam_lock); - } + pipe_connect->enabled = false; + spin_lock(&ctx->usb_bam_lock); + if (ctx->pipes_enabled_per_bam == 0) + log_event_err("%s: wrong pipes enabled counter for bam=%d\n", + __func__, pipe_connect->bam_type); + else + ctx->pipes_enabled_per_bam -= 1; + spin_unlock(&ctx->usb_bam_lock); + } - pipe_connect->ipa_clnt_hdl = -1; - info[cur_bam].prod_pipes_enabled_per_bam -= 1; + pipe_connect->ipa_clnt_hdl = -1; + info[cur_bam].prod_pipes_enabled_per_bam -= 1; - return 0; + return 0; } static void _msm_bam_wait_for_host_prod_granted(enum usb_ctrl bam_type) diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index 9c9ab664befc..141b331d8bf7 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -628,14 +628,15 @@ EXPORT_SYMBOL(fscrypt_has_permitted_context); static int fscrypt_update_context(union fscrypt_context *ctx) { - char *boot = "ufs"; + char *boot = "ufs"; - if (!fscrypt_find_storage_type(&boot)) { - if (!strcmp(boot, SDHCI)) - ctx->v1.flags |= FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32; - return 0; - } - return -EINVAL; + if (!fscrypt_find_storage_type(&boot)) { + if (!strcmp(boot, SDHCI)) { + ctx->v1.flags |= FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32; + return 0; + } + } + return -EINVAL; } /** From f8b5088b0dcdf771ce3d80b0c4bf7b3c53d80f76 Mon Sep 17 00:00:00 2001 From: John Galt Date: Wed, 3 Jan 2024 12:04:12 -0500 Subject: [PATCH 9/9] Makefile: utilize unified lto on thinlto Small output size increase and build time increase due to additional vectorization and unrolling --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c0f7e2c94078..fa71067b8675 100644 --- a/Makefile +++ b/Makefile @@ -971,7 +971,8 @@ endif ifdef CONFIG_LTO_CLANG ifdef CONFIG_THINLTO -lto-clang-flags := -flto=thin +lto-clang-flags := -funified-lto +lto-clang-flags += -flto=thin LDFLAGS += --thinlto-cache-dir=.thinlto-cache else lto-clang-flags := -flto