binder: always allocate/map first BINDER_MIN_ALLOC pages
Certain usecases like camera are constantly allocating and freeing binder buffers beyond the first 4k resulting in mmap_sem contention. If we expand the allocated range from 4k to something higher, we can reduce the contention. Tests show that 6 pages is enough to cause very little update_page_range operations and reduces contention. Bug: 36727951 Change-Id: I28bc3fb9b33c764c257e28487712fce2a3c1078b Reported-by: Tim Murray <timmurray@google.com> Signed-off-by: Joel Fernandes <joelaf@google.com> Pre-allocate 1 instead of 6 pages as in the original patch, as we use this pre-allocated page to prevent the first page from getting unpinned after removing the buffer headers, rather than pinning pages to speedup larger transactions. Change-Id: Id027adcfd61b2d6b37f69a3f6009a068e90e84f0 Signed-off-by: Sherry Yang <sherryy@android.com>
This commit is contained in:
committed by
John Dias
parent
b487b37b84
commit
2410d72197
@@ -29,6 +29,8 @@
|
||||
#include "binder_alloc.h"
|
||||
#include "binder_trace.h"
|
||||
|
||||
#define BINDER_MIN_ALLOC (1 * PAGE_SIZE)
|
||||
|
||||
static DEFINE_MUTEX(binder_alloc_mmap_lock);
|
||||
|
||||
enum {
|
||||
@@ -155,9 +157,9 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
||||
void *start, void *end,
|
||||
struct vm_area_struct *vma)
|
||||
static int __binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
||||
void *start, void *end,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
void *page_addr;
|
||||
unsigned long user_page_addr;
|
||||
@@ -257,6 +259,20 @@ err_no_vma:
|
||||
return vma ? -ENOMEM : -ESRCH;
|
||||
}
|
||||
|
||||
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
||||
void *start, void *end,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
/*
|
||||
* For regular updates, move up start if needed since MIN_ALLOC pages
|
||||
* are always mapped
|
||||
*/
|
||||
if (start - alloc->buffer < BINDER_MIN_ALLOC)
|
||||
start = alloc->buffer + BINDER_MIN_ALLOC;
|
||||
|
||||
return __binder_update_page_range(alloc, allocate, start, end, vma);
|
||||
}
|
||||
|
||||
struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
|
||||
size_t data_size,
|
||||
size_t offsets_size,
|
||||
@@ -587,8 +603,8 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
|
||||
}
|
||||
alloc->buffer_size = vma->vm_end - vma->vm_start;
|
||||
|
||||
if (binder_update_page_range(alloc, 1, alloc->buffer,
|
||||
alloc->buffer + PAGE_SIZE, vma)) {
|
||||
if (__binder_update_page_range(alloc, 1, alloc->buffer,
|
||||
alloc->buffer + BINDER_MIN_ALLOC, vma)) {
|
||||
ret = -ENOMEM;
|
||||
failure_string = "alloc small buf";
|
||||
goto err_alloc_small_buf_failed;
|
||||
|
||||
Reference in New Issue
Block a user