Revert "mm: page_owner: print largest page user when OOM"

* Reason: Conflicts with upstream changes.

This reverts commit 6780a2f6fd.

Change-Id: I7863d67d35519960d8deb2ba42a3f188e08bf720
This commit is contained in:
bengris32
2023-08-15 17:00:50 +01:00
parent 5e5da950a5
commit 8d6ff44c7b
5 changed files with 1 additions and 148 deletions

View File

@@ -28,9 +28,5 @@ struct stack_trace;
depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags);
void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace);
#ifdef CONFIG_PAGE_OWNER
void depot_hit_stack(depot_stack_handle_t handle, struct stack_trace *trace);
void show_max_hit_page(void);
#endif
#endif

View File

@@ -69,9 +69,6 @@ struct stack_record {
struct stack_record *next; /* Link in the hashtable */
u32 hash; /* Hash in the hastable */
u32 size; /* Number of frames in the stack */
#ifdef CONFIG_PAGE_OWNER
u32 hit;
#endif
union handle_parts handle;
unsigned long entries[1]; /* Variable-sized array of entries. */
};
@@ -82,11 +79,6 @@ static int depot_index;
static int next_slab_inited;
static size_t depot_offset;
static DEFINE_RAW_SPINLOCK(depot_lock);
#ifdef CONFIG_PAGE_OWNER
static struct stack_record *max_found;
static DEFINE_SPINLOCK(max_found_lock);
#endif
static bool init_stack_slab(void **prealloc)
{
@@ -149,9 +141,6 @@ static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
stack->hash = hash;
stack->size = size;
#ifdef CONFIG_PAGE_OWNER
stack->hit = 0;
#endif
stack->handle.slabindex = depot_index;
stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
stack->handle.valid = 1;
@@ -222,40 +211,6 @@ void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace)
}
EXPORT_SYMBOL_GPL(depot_fetch_stack);
#ifdef CONFIG_PAGE_OWNER
void depot_hit_stack(depot_stack_handle_t handle, struct stack_trace *trace)
{
union handle_parts parts = { .handle = handle };
void *slab = stack_slabs[parts.slabindex];
size_t offset = parts.offset << STACK_ALLOC_ALIGN;
struct stack_record *stack = slab + offset;
unsigned long flags;
stack->hit++;
spin_lock_irqsave(&max_found_lock, flags);
if ((!max_found) || (stack->hit > max_found->hit))
max_found = stack;
spin_unlock_irqrestore(&max_found_lock, flags);
}
void show_max_hit_page(void)
{
unsigned long entries[16];
unsigned long flags;
struct stack_trace trace = {
.nr_entries = 0,
.entries = entries,
.max_entries = 16,
.skip = 0
};
spin_lock_irqsave(&max_found_lock, flags);
depot_fetch_stack(max_found->handle.handle, &trace);
pr_info("max found hit=%d\n", max_found->hit);
print_stack_trace(&trace, 2);
spin_unlock_irqrestore(&max_found_lock, flags);
}
#endif
/**
* depot_save_stack - save stack in a stack depot.
* @trace - the stacktrace to save.

View File

@@ -557,8 +557,4 @@ static inline bool is_migrate_highatomic_page(struct page *page)
void setup_zone_pageset(struct zone *zone);
extern struct page *alloc_new_node_page(struct page *page, unsigned long node);
#ifdef CONFIG_PAGE_OWNER
ssize_t print_max_page_owner(void);
#endif
#endif /* __MM_INTERNAL_H */

View File

@@ -1147,12 +1147,8 @@ bool out_of_memory(struct oom_control *oc)
* system level, we cannot survive this and will enter
* an endless loop in the allocator. Bail out now.
*/
if (!is_sysrq_oom(oc) && !is_memcg_oom(oc)) {
#ifdef CONFIG_PAGE_OWNER
print_max_page_owner();
#endif
if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
panic("System is deadlocked on memory\n");
}
}
if (oc->chosen && oc->chosen != (void *)-1UL)
oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :

View File

@@ -10,7 +10,6 @@
#include <linux/migrate.h>
#include <linux/stackdepot.h>
#include <linux/seq_file.h>
#include <linux/stackdepot.h>
#include "internal.h"
@@ -640,92 +639,3 @@ static int __init pageowner_init(void)
return PTR_ERR_OR_ZERO(dentry);
}
late_initcall(pageowner_init)
static ssize_t __update_max_page_owner(unsigned long pfn,
struct page *page, struct page_owner *page_owner,
depot_stack_handle_t handle)
{
unsigned long entries[PAGE_OWNER_STACK_DEPTH];
struct stack_trace trace = {
.nr_entries = 0,
.entries = entries,
.max_entries = PAGE_OWNER_STACK_DEPTH,
.skip = 0
};
depot_hit_stack(handle, &trace);
return 0;
}
ssize_t print_max_page_owner(void)
{
unsigned long pfn;
struct page *page;
struct page_ext *page_ext;
struct page_owner *page_owner;
depot_stack_handle_t handle;
if (!static_branch_unlikely(&page_owner_inited))
return -EINVAL;
page = NULL;
pfn = min_low_pfn;
/* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
pfn++;
drain_all_pages(NULL);
/* Find an allocated page */
for (; pfn < max_pfn; pfn++) {
/*
* If the new page is in a new MAX_ORDER_NR_PAGES area,
* validate the area as existing, skip it if not
*/
if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
pfn += MAX_ORDER_NR_PAGES - 1;
continue;
}
/* Check for holes within a MAX_ORDER area */
if (!pfn_valid_within(pfn))
continue;
page = pfn_to_page(pfn);
if (PageBuddy(page)) {
unsigned long freepage_order = page_order_unsafe(page);
if (freepage_order < MAX_ORDER)
pfn += (1UL << freepage_order) - 1;
continue;
}
page_ext = lookup_page_ext(page);
if (unlikely(!page_ext))
continue;
/*
* Some pages could be missed by concurrent allocation or free,
* because we don't hold the zone lock.
*/
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
continue;
page_owner = get_page_owner(page_ext);
/*
* Access to page_ext->handle isn't synchronous so we should
* be careful to access it.
*/
handle = READ_ONCE(page_owner->handle);
if (!handle)
continue;
__update_max_page_owner(pfn, page, page_owner, handle);
}
show_max_hit_page();
return 0;
}