Revert "bpf: Defer the free of inner map when necessary"

This reverts commit 90c445799f which is
commit 876673364161da50eed6b472d746ef88242b2368 upstream.

It breaks the Android kernel build AND the kernel abi and can be brought
back in the future in an abi-safe way if it is really needed.

Bug: 161946584
Change-Id: I8c7820bcca06e847cdc7c894005d383a2ac828a0
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2024-05-16 13:13:44 +00:00
parent 38a24db1c2
commit e2ddf25269
3 changed files with 6 additions and 38 deletions

View File

@@ -175,11 +175,7 @@ struct bpf_map {
*/
atomic64_t refcnt ____cacheline_aligned;
atomic64_t usercnt;
/* rcu is used before freeing and work is only used during freeing */
union {
struct work_struct work;
struct rcu_head rcu;
};
struct work_struct work;
struct mutex freeze_mutex;
#ifdef __GENKSYMS__
/* Preserve the CRC change that commit 33fe044f6a9e ("bpf: Fix toctou on
@@ -189,7 +185,6 @@ struct bpf_map {
#else
atomic64_t writecnt;
#endif
bool free_after_mult_rcu_gp;
};
static inline bool map_value_has_spin_lock(const struct bpf_map *map)

View File

@@ -102,15 +102,10 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map,
void bpf_map_fd_put_ptr(void *ptr)
{
struct bpf_map *inner_map = ptr;
/* The inner map may still be used by both non-sleepable and sleepable
* bpf program, so free it after one RCU grace period and one tasks
* trace RCU grace period.
/* ptr->ops->map_free() has to go through one
* rcu grace period by itself.
*/
if (need_defer)
WRITE_ONCE(inner_map->free_after_mult_rcu_gp, true);
bpf_map_put(inner_map);
bpf_map_put(ptr);
}
u32 bpf_map_fd_sys_lookup_elem(void *ptr)

View File

@@ -495,25 +495,6 @@ static void bpf_map_put_uref(struct bpf_map *map)
}
}
static void bpf_map_free_in_work(struct bpf_map *map)
{
INIT_WORK(&map->work, bpf_map_free_deferred);
schedule_work(&map->work);
}
static void bpf_map_free_rcu_gp(struct rcu_head *rcu)
{
bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu));
}
static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu)
{
if (rcu_trace_implies_rcu_gp())
bpf_map_free_rcu_gp(rcu);
else
call_rcu(rcu, bpf_map_free_rcu_gp);
}
/* decrement map refcnt and schedule it for freeing via workqueue
* (unrelying map implementation ops->map_free() might sleep)
*/
@@ -523,11 +504,8 @@ static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
/* bpf_map_free_id() must be called first */
bpf_map_free_id(map, do_idr_lock);
btf_put(map->btf);
if (READ_ONCE(map->free_after_mult_rcu_gp))
call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp);
else
bpf_map_free_in_work(map);
INIT_WORK(&map->work, bpf_map_free_deferred);
schedule_work(&map->work);
}
}