binder: Reserve caches for small, high-frequency memory allocations
Most of binder's memory allocations are tiny, and they're allocated and freed extremely frequently. The latency from going through the page allocator all the time for such small allocations ends up being quite high, especially when the system is low on memory. Binder is performance-critical, so this is suboptimal. Instead of using kzalloc to allocate a struct every time, reserve caches specifically for allocating each struct quickly. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> Signed-off-by: Danny Lin <danny@kdrag0n.dev> Signed-off-by: UtsavBalar1231 <utsavbalar1231@gmail.com>
This commit is contained in:
committed by
UtsavBalar1231
parent
f81e706d42
commit
9706a52be6
@@ -203,6 +203,14 @@ static inline void binder_stats_created(enum binder_stat_types type)
|
||||
struct binder_transaction_log binder_transaction_log;
|
||||
struct binder_transaction_log binder_transaction_log_failed;
|
||||
|
||||
static struct kmem_cache *binder_node_pool;
|
||||
static struct kmem_cache *binder_proc_pool;
|
||||
static struct kmem_cache *binder_ref_death_pool;
|
||||
static struct kmem_cache *binder_ref_pool;
|
||||
static struct kmem_cache *binder_thread_pool;
|
||||
static struct kmem_cache *binder_transaction_pool;
|
||||
static struct kmem_cache *binder_work_pool;
|
||||
|
||||
static struct binder_transaction_log_entry *binder_transaction_log_add(
|
||||
struct binder_transaction_log *log)
|
||||
{
|
||||
@@ -1350,9 +1358,9 @@ static struct binder_node *binder_init_node_ilocked(
|
||||
static struct binder_node *binder_new_node(struct binder_proc *proc,
|
||||
struct flat_binder_object *fp)
|
||||
{
|
||||
struct binder_node *node;
|
||||
struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
struct binder_node *node, *new_node;
|
||||
|
||||
new_node = kmem_cache_zalloc(binder_node_pool, GFP_KERNEL);
|
||||
if (!new_node)
|
||||
return NULL;
|
||||
binder_inner_proc_lock(proc);
|
||||
@@ -1362,14 +1370,14 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
|
||||
/*
|
||||
* The node was already added by another thread
|
||||
*/
|
||||
kfree(new_node);
|
||||
kmem_cache_free(binder_node_pool, new_node);
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
static void binder_free_node(struct binder_node *node)
|
||||
{
|
||||
kfree(node);
|
||||
kmem_cache_free(binder_node_pool, node);
|
||||
binder_stats_deleted(BINDER_STAT_NODE);
|
||||
}
|
||||
|
||||
@@ -1856,8 +1864,9 @@ static void binder_free_ref(struct binder_ref *ref)
|
||||
{
|
||||
if (ref->node)
|
||||
binder_free_node(ref->node);
|
||||
kfree(ref->death);
|
||||
kfree(ref);
|
||||
if (ref->death)
|
||||
kmem_cache_free(binder_ref_death_pool, ref->death);
|
||||
kmem_cache_free(binder_ref_pool, ref);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1950,7 +1959,7 @@ static int binder_inc_ref_for_node(struct binder_proc *proc,
|
||||
ref = binder_get_ref_for_node_olocked(proc, node, NULL);
|
||||
if (!ref) {
|
||||
binder_proc_unlock(proc);
|
||||
new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
|
||||
new_ref = kmem_cache_zalloc(binder_ref_pool, GFP_KERNEL);
|
||||
if (!new_ref)
|
||||
return -ENOMEM;
|
||||
binder_proc_lock(proc);
|
||||
@@ -1964,7 +1973,7 @@ static int binder_inc_ref_for_node(struct binder_proc *proc,
|
||||
* Another thread created the ref first so
|
||||
* free the one we allocated
|
||||
*/
|
||||
kfree(new_ref);
|
||||
kmem_cache_free(binder_ref_pool, new_ref);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -2099,7 +2108,7 @@ static void binder_free_transaction(struct binder_transaction *t)
|
||||
* If the transaction has no target_proc, then
|
||||
* t->buffer->transaction has already been cleared.
|
||||
*/
|
||||
kfree(t);
|
||||
kmem_cache_free(binder_transaction_pool, t);
|
||||
binder_stats_deleted(BINDER_STAT_TRANSACTION);
|
||||
}
|
||||
|
||||
@@ -3129,7 +3138,7 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
e->to_proc = target_proc->pid;
|
||||
|
||||
/* TODO: reuse incoming transaction for reply */
|
||||
t = kzalloc(sizeof(*t), GFP_KERNEL);
|
||||
t = kmem_cache_zalloc(binder_transaction_pool, GFP_KERNEL);
|
||||
if (t == NULL) {
|
||||
return_error = BR_FAILED_REPLY;
|
||||
return_error_param = -ENOMEM;
|
||||
@@ -3139,7 +3148,7 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
binder_stats_created(BINDER_STAT_TRANSACTION);
|
||||
spin_lock_init(&t->lock);
|
||||
|
||||
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
|
||||
tcomplete = kmem_cache_zalloc(binder_work_pool, GFP_KERNEL);
|
||||
if (tcomplete == NULL) {
|
||||
return_error = BR_FAILED_REPLY;
|
||||
return_error_param = -ENOMEM;
|
||||
@@ -3562,10 +3571,10 @@ err_bad_extra_size:
|
||||
if (secctx)
|
||||
security_release_secctx(secctx, secctx_sz);
|
||||
err_get_secctx_failed:
|
||||
kfree(tcomplete);
|
||||
kmem_cache_free(binder_work_pool, tcomplete);
|
||||
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
|
||||
err_alloc_tcomplete_failed:
|
||||
kfree(t);
|
||||
kmem_cache_free(binder_transaction_pool, t);
|
||||
binder_stats_deleted(BINDER_STAT_TRANSACTION);
|
||||
err_alloc_t_failed:
|
||||
err_bad_call_stack:
|
||||
@@ -3917,7 +3926,7 @@ static int binder_thread_write(struct binder_proc *proc,
|
||||
* Allocate memory for death notification
|
||||
* before taking lock
|
||||
*/
|
||||
death = kzalloc(sizeof(*death), GFP_KERNEL);
|
||||
death = kmem_cache_zalloc(binder_ref_death_pool, GFP_KERNEL);
|
||||
if (death == NULL) {
|
||||
WARN_ON(thread->return_error.cmd !=
|
||||
BR_OK);
|
||||
@@ -3942,7 +3951,8 @@ static int binder_thread_write(struct binder_proc *proc,
|
||||
"BC_CLEAR_DEATH_NOTIFICATION",
|
||||
target);
|
||||
binder_proc_unlock(proc);
|
||||
kfree(death);
|
||||
if (death)
|
||||
kmem_cache_free(binder_ref_death_pool, death);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -3963,7 +3973,7 @@ static int binder_thread_write(struct binder_proc *proc,
|
||||
proc->pid, thread->pid);
|
||||
binder_node_unlock(ref->node);
|
||||
binder_proc_unlock(proc);
|
||||
kfree(death);
|
||||
kmem_cache_free(binder_ref_death_pool, death);
|
||||
break;
|
||||
}
|
||||
binder_stats_created(BINDER_STAT_DEATH);
|
||||
@@ -4263,7 +4273,7 @@ retry:
|
||||
case BINDER_WORK_TRANSACTION_COMPLETE: {
|
||||
binder_inner_proc_unlock(proc);
|
||||
cmd = BR_TRANSACTION_COMPLETE;
|
||||
kfree(w);
|
||||
kmem_cache_free(binder_work_pool, w);
|
||||
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
|
||||
if (put_user(cmd, (uint32_t __user *)ptr))
|
||||
return -EFAULT;
|
||||
@@ -4384,7 +4394,7 @@ retry:
|
||||
(u64)cookie);
|
||||
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
|
||||
binder_inner_proc_unlock(proc);
|
||||
kfree(death);
|
||||
kmem_cache_free(binder_ref_death_pool, death);
|
||||
binder_stats_deleted(BINDER_STAT_DEATH);
|
||||
} else {
|
||||
binder_enqueue_work_ilocked(
|
||||
@@ -4554,7 +4564,7 @@ static void binder_release_work(struct binder_proc *proc,
|
||||
case BINDER_WORK_TRANSACTION_COMPLETE: {
|
||||
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
|
||||
"undelivered TRANSACTION_COMPLETE\n");
|
||||
kfree(w);
|
||||
kmem_cache_free(binder_work_pool, w);
|
||||
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
|
||||
} break;
|
||||
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
|
||||
@@ -4565,7 +4575,7 @@ static void binder_release_work(struct binder_proc *proc,
|
||||
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
|
||||
"undelivered death notification, %016llx\n",
|
||||
(u64)death->cookie);
|
||||
kfree(death);
|
||||
kmem_cache_free(binder_ref_death_pool, death);
|
||||
binder_stats_deleted(BINDER_STAT_DEATH);
|
||||
} break;
|
||||
default:
|
||||
@@ -4626,14 +4636,14 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
|
||||
thread = binder_get_thread_ilocked(proc, NULL);
|
||||
binder_inner_proc_unlock(proc);
|
||||
if (!thread) {
|
||||
new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
|
||||
new_thread = kmem_cache_zalloc(binder_thread_pool, GFP_KERNEL);
|
||||
if (new_thread == NULL)
|
||||
return NULL;
|
||||
binder_inner_proc_lock(proc);
|
||||
thread = binder_get_thread_ilocked(proc, new_thread);
|
||||
binder_inner_proc_unlock(proc);
|
||||
if (thread != new_thread)
|
||||
kfree(new_thread);
|
||||
kmem_cache_free(binder_thread_pool, new_thread);
|
||||
}
|
||||
return thread;
|
||||
}
|
||||
@@ -4645,7 +4655,7 @@ static void binder_free_proc(struct binder_proc *proc)
|
||||
binder_alloc_deferred_release(&proc->alloc);
|
||||
put_task_struct(proc->tsk);
|
||||
binder_stats_deleted(BINDER_STAT_PROC);
|
||||
kfree(proc);
|
||||
kmem_cache_free(binder_proc_pool, proc);
|
||||
}
|
||||
|
||||
static void binder_free_thread(struct binder_thread *thread)
|
||||
@@ -4654,7 +4664,7 @@ static void binder_free_thread(struct binder_thread *thread)
|
||||
binder_stats_deleted(BINDER_STAT_THREAD);
|
||||
binder_proc_dec_tmpref(thread->proc);
|
||||
put_task_struct(thread->task);
|
||||
kfree(thread);
|
||||
kmem_cache_free(binder_thread_pool, thread);
|
||||
}
|
||||
|
||||
static int binder_thread_release(struct binder_proc *proc,
|
||||
@@ -5165,7 +5175,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
|
||||
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
|
||||
current->group_leader->pid, current->pid);
|
||||
|
||||
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
|
||||
proc = kmem_cache_zalloc(binder_proc_pool, GFP_KERNEL);
|
||||
if (proc == NULL)
|
||||
return -ENOMEM;
|
||||
spin_lock_init(&proc->inner_lock);
|
||||
@@ -6081,6 +6091,73 @@ static int __init init_binder_device(const char *name)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init binder_create_pools(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = binder_buffer_pool_create();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
binder_node_pool = KMEM_CACHE(binder_node, SLAB_HWCACHE_ALIGN);
|
||||
if (!binder_node_pool)
|
||||
goto err_node_pool;
|
||||
|
||||
binder_proc_pool = KMEM_CACHE(binder_proc, SLAB_HWCACHE_ALIGN);
|
||||
if (!binder_proc_pool)
|
||||
goto err_proc_pool;
|
||||
|
||||
binder_ref_death_pool = KMEM_CACHE(binder_ref_death, SLAB_HWCACHE_ALIGN);
|
||||
if (!binder_ref_death_pool)
|
||||
goto err_ref_death_pool;
|
||||
|
||||
binder_ref_pool = KMEM_CACHE(binder_ref, SLAB_HWCACHE_ALIGN);
|
||||
if (!binder_ref_pool)
|
||||
goto err_ref_pool;
|
||||
|
||||
binder_thread_pool = KMEM_CACHE(binder_thread, SLAB_HWCACHE_ALIGN);
|
||||
if (!binder_thread_pool)
|
||||
goto err_thread_pool;
|
||||
|
||||
binder_transaction_pool = KMEM_CACHE(binder_transaction, SLAB_HWCACHE_ALIGN);
|
||||
if (!binder_transaction_pool)
|
||||
goto err_transaction_pool;
|
||||
|
||||
binder_work_pool = KMEM_CACHE(binder_work, SLAB_HWCACHE_ALIGN);
|
||||
if (!binder_work_pool)
|
||||
goto err_work_pool;
|
||||
|
||||
return 0;
|
||||
|
||||
err_work_pool:
|
||||
kmem_cache_destroy(binder_transaction_pool);
|
||||
err_transaction_pool:
|
||||
kmem_cache_destroy(binder_thread_pool);
|
||||
err_thread_pool:
|
||||
kmem_cache_destroy(binder_ref_pool);
|
||||
err_ref_pool:
|
||||
kmem_cache_destroy(binder_ref_death_pool);
|
||||
err_ref_death_pool:
|
||||
kmem_cache_destroy(binder_proc_pool);
|
||||
err_proc_pool:
|
||||
kmem_cache_destroy(binder_node_pool);
|
||||
err_node_pool:
|
||||
binder_buffer_pool_destroy();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void __init binder_destroy_pools(void)
|
||||
{
|
||||
binder_buffer_pool_destroy();
|
||||
kmem_cache_destroy(binder_node_pool);
|
||||
kmem_cache_destroy(binder_proc_pool);
|
||||
kmem_cache_destroy(binder_ref_death_pool);
|
||||
kmem_cache_destroy(binder_ref_pool);
|
||||
kmem_cache_destroy(binder_thread_pool);
|
||||
kmem_cache_destroy(binder_transaction_pool);
|
||||
kmem_cache_destroy(binder_work_pool);
|
||||
}
|
||||
|
||||
static int __init binder_init(void)
|
||||
{
|
||||
int ret;
|
||||
@@ -6089,10 +6166,14 @@ static int __init binder_init(void)
|
||||
struct hlist_node *tmp;
|
||||
char *device_names = NULL;
|
||||
|
||||
ret = binder_alloc_shrinker_init();
|
||||
ret = binder_create_pools();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = binder_alloc_shrinker_init();
|
||||
if (ret)
|
||||
goto err_alloc_shrinker_failed;
|
||||
|
||||
atomic_set(&binder_transaction_log.cur, ~0U);
|
||||
atomic_set(&binder_transaction_log_failed.cur, ~0U);
|
||||
|
||||
@@ -6167,6 +6248,9 @@ err_init_binder_device_failed:
|
||||
err_alloc_device_names_failed:
|
||||
debugfs_remove_recursive(binder_debugfs_dir_entry_root);
|
||||
|
||||
err_alloc_shrinker_failed:
|
||||
binder_destroy_pools();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -53,6 +53,22 @@ module_param_named(debug_mask, binder_alloc_debug_mask,
|
||||
pr_info(x); \
|
||||
} while (0)
|
||||
|
||||
static struct kmem_cache *binder_buffer_pool;
|
||||
|
||||
int binder_buffer_pool_create(void)
|
||||
{
|
||||
binder_buffer_pool = KMEM_CACHE(binder_buffer, SLAB_HWCACHE_ALIGN);
|
||||
if (!binder_buffer_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void binder_buffer_pool_destroy(void)
|
||||
{
|
||||
kmem_cache_destroy(binder_buffer_pool);
|
||||
}
|
||||
|
||||
static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
|
||||
{
|
||||
return list_entry(buffer->entry.next, struct binder_buffer, entry);
|
||||
@@ -464,7 +480,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
|
||||
if (buffer_size != size) {
|
||||
struct binder_buffer *new_buffer;
|
||||
|
||||
new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
|
||||
new_buffer = kmem_cache_zalloc(binder_buffer_pool, GFP_KERNEL);
|
||||
if (!new_buffer) {
|
||||
pr_err("%s: %d failed to alloc new buffer struct\n",
|
||||
__func__, alloc->pid);
|
||||
@@ -588,7 +604,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
|
||||
buffer_start_page(buffer) + PAGE_SIZE);
|
||||
}
|
||||
list_del(&buffer->entry);
|
||||
kfree(buffer);
|
||||
kmem_cache_free(binder_buffer_pool, buffer);
|
||||
}
|
||||
|
||||
static void binder_free_buf_locked(struct binder_alloc *alloc,
|
||||
@@ -702,7 +718,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
|
||||
}
|
||||
alloc->buffer_size = vma->vm_end - vma->vm_start;
|
||||
|
||||
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
|
||||
buffer = kmem_cache_zalloc(binder_buffer_pool, GFP_KERNEL);
|
||||
if (!buffer) {
|
||||
ret = -ENOMEM;
|
||||
failure_string = "alloc buffer struct";
|
||||
@@ -760,7 +776,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
|
||||
|
||||
list_del(&buffer->entry);
|
||||
WARN_ON_ONCE(!list_empty(&alloc->buffers));
|
||||
kfree(buffer);
|
||||
kmem_cache_free(binder_buffer_pool, buffer);
|
||||
}
|
||||
|
||||
page_count = 0;
|
||||
|
||||
@@ -143,6 +143,8 @@ extern void binder_alloc_print_allocated(struct seq_file *m,
|
||||
struct binder_alloc *alloc);
|
||||
void binder_alloc_print_pages(struct seq_file *m,
|
||||
struct binder_alloc *alloc);
|
||||
extern int binder_buffer_pool_create(void);
|
||||
extern void binder_buffer_pool_destroy(void);
|
||||
|
||||
/**
|
||||
* binder_alloc_get_free_async_space() - get free space available for async
|
||||
|
||||
Reference in New Issue
Block a user