Files
UtsavBalar1231 a896c5cfb3 Merge remote-tracking branch 'slmk/linux-4.14' into auto-kernel
* slmk/linux-4.14:
  simple_lmk: Remove unnecessary clean-up when timeout is reached
  simple_lmk: Hold an RCU read lock instead of the tasklist read lock
  mm: Don't stop kswapd on a per-node basis when there are no waiters
  simple_lmk: Consider all positive adjs when finding victims
  mm: vmpressure: Ignore allocation orders above PAGE_ALLOC_COSTLY_ORDER
  mm: Don't warn on page allocation failures for OOM-killed processes
  mm: Adjust tsk_is_oom_victim() for Simple LMK
  mm: vmpressure: Don't cache the window size
  mm: vmpressure: Interpret zero scanned pages as 100% pressure
  mm: vmpressure: Don't exclude any allocation types
  simple_lmk: Update adj targeting for Android 10
  simple_lmk: Use vmpressure notifier to trigger kills
  mm: vmpressure: make vmpressure window variable
  mm: vmpressure: account allocstalls only on higher pressures
  mm: vmpressure: scale pressure based on reclaim context
  mm: vmpressure: allow in-kernel clients to subscribe for events
  mm, vmpressure: int cast vmpressure level/model for -1 comparison
  mm: Stop kswapd early when nothing's waiting for it to free pages
  simple_lmk: Include swap memory usage in the size of victims
  simple_lmk: Relax memory barriers and clean up some styling
  simple_lmk: Place victims onto SCHED_RR
  simple_lmk: Add a timeout to stop waiting for victims to die
  simple_lmk: Ignore tasks that won't free memory
  simple_lmk: Simplify tricks used to speed up the death process
  simple_lmk: Report mm as freed as soon as exit_mmap() finishes
  simple_lmk: Mark victim thread group with TIF_MEMDIE
  simple_lmk: Disable OOM killer when Simple LMK is enabled
  simple_lmk: Print a message when there are no processes to kill
  simple_lmk: Remove compat cruft not specific to 4.14
  simple_lmk: Update copyright to 2020
  simple_lmk: Don't queue up new reclaim requests during reclaim
  simple_lmk: Increase default minfree value
  simple_lmk: Clean up some code style nitpicks
  simple_lmk: Make reclaim deterministic
  simple_lmk: Fix broken multicopy atomicity for victims_to_kill
  simple_lmk: Use proper atomic_* operations where needed
  simple_lmk: Remove kthread_should_stop() exit condition
  simple_lmk: Fix pages_found calculation
  simple_lmk: Introduce Simple Low Memory Killer for Android

Signed-off-by: UtsavBalar1231 <utsavbalar1231@gmail.com>

Conflicts:
	kernel/exit.c
	kernel/fork.c
	mm/Makefile
	mm/vmpressure.c
	mm/vmscan.c
2020-07-08 09:55:52 +05:30

130 lines
3.4 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __INCLUDE_LINUX_OOM_H
#define __INCLUDE_LINUX_OOM_H
#include <linux/sched/signal.h>
#include <linux/types.h>
#include <linux/nodemask.h>
#include <uapi/linux/oom.h>
#include <linux/sched/coredump.h> /* MMF_* */
#include <linux/mm.h> /* VM_FAULT* */
struct zonelist;
struct notifier_block;
struct mem_cgroup;
struct task_struct;
/*
* Details of the page allocation that triggered the oom killer that are used to
* determine what should be killed.
*/
struct oom_control {
/* Used to determine cpuset */
struct zonelist *zonelist;
/* Used to determine mempolicy */
nodemask_t *nodemask;
/* Memory cgroup in which oom is invoked, or NULL for global oom */
struct mem_cgroup *memcg;
/* Used to determine cpuset and node locality requirement */
const gfp_t gfp_mask;
/*
* order == -1 means the oom kill is required by sysrq, otherwise only
* for display purposes.
*/
const int order;
/* Used by oom implementation, do not set */
unsigned long totalpages;
struct task_struct *chosen;
unsigned long chosen_points;
};
extern struct mutex oom_lock;
static inline void set_current_oom_origin(void)
{
current->signal->oom_flag_origin = true;
}
static inline void clear_current_oom_origin(void)
{
current->signal->oom_flag_origin = false;
}
static inline bool oom_task_origin(const struct task_struct *p)
{
return p->signal->oom_flag_origin;
}
static inline bool tsk_is_oom_victim(struct task_struct * tsk)
{
#ifdef CONFIG_ANDROID_SIMPLE_LMK
return test_ti_thread_flag(task_thread_info(tsk), TIF_MEMDIE);
#else
return tsk->signal->oom_mm;
#endif
}
/*
* Use this helper if tsk->mm != mm and the victim mm needs a special
* handling. This is guaranteed to stay true after once set.
*/
static inline bool mm_is_oom_victim(struct mm_struct *mm)
{
return test_bit(MMF_OOM_VICTIM, &mm->flags);
}
/*
* Checks whether a page fault on the given mm is still reliable.
* This is no longer true if the oom reaper started to reap the
* address space which is reflected by MMF_UNSTABLE flag set in
* the mm. At that moment any !shared mapping would lose the content
* and could cause a memory corruption (zero pages instead of the
* original content).
*
* User should call this before establishing a page table entry for
* a !shared mapping and under the proper page table lock.
*
* Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise.
*/
static inline int check_stable_address_space(struct mm_struct *mm)
{
if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
return VM_FAULT_SIGBUS;
return 0;
}
void __oom_reap_task_mm(struct mm_struct *mm);
extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask,
unsigned long totalpages);
extern bool out_of_memory(struct oom_control *oc);
extern void exit_oom_victim(void);
extern int register_oom_notifier(struct notifier_block *nb);
extern int unregister_oom_notifier(struct notifier_block *nb);
extern bool oom_killer_disable(signed long timeout);
extern void oom_killer_enable(void);
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
extern void dump_tasks(struct mem_cgroup *memcg,
const nodemask_t *nodemask);
extern void wake_oom_reaper(struct task_struct *tsk);
/* sysctls */
extern int sysctl_oom_dump_tasks;
extern int sysctl_oom_kill_allocating_task;
extern int sysctl_panic_on_oom;
#endif /* _INCLUDE_LINUX_OOM_H */