Revert "mm: introduce __lru_cache_add_active_or_unevictable"
This reverts commit 34e57df243.
Bug: 128240262
Change-Id: I54aa09970c5967cfa7a93deca52f28773e27c041
Signed-off-by: Minchan Kim <minchan@google.com>
This commit is contained in:
@@ -341,14 +341,8 @@ extern void swap_setup(void);
|
||||
|
||||
extern void add_page_to_unevictable_list(struct page *page);
|
||||
|
||||
extern void __lru_cache_add_active_or_unevictable(struct page *page,
|
||||
unsigned long vma_flags);
|
||||
|
||||
static inline void lru_cache_add_active_or_unevictable(struct page *page,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
return __lru_cache_add_active_or_unevictable(page, vma->vm_flags);
|
||||
}
|
||||
extern void lru_cache_add_active_or_unevictable(struct page *page,
|
||||
struct vm_area_struct *vma);
|
||||
|
||||
/* linux/mm/vmscan.c */
|
||||
extern unsigned long zone_reclaimable_pages(struct zone *zone);
|
||||
|
||||
@@ -2690,7 +2690,7 @@ static int wp_page_copy(struct vm_fault *vmf)
|
||||
ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
|
||||
page_add_new_anon_rmap(new_page, vma, vmf->address, false);
|
||||
mem_cgroup_commit_charge(new_page, memcg, false, false);
|
||||
__lru_cache_add_active_or_unevictable(new_page, vmf->vma_flags);
|
||||
lru_cache_add_active_or_unevictable(new_page, vma);
|
||||
/*
|
||||
* We call the notify macro here because, when using secondary
|
||||
* mmu page tables (such as kvm shadow page tables), we want the
|
||||
@@ -3208,7 +3208,7 @@ int do_swap_page(struct vm_fault *vmf)
|
||||
if (unlikely(page != swapcache && swapcache)) {
|
||||
page_add_new_anon_rmap(page, vma, vmf->address, false);
|
||||
mem_cgroup_commit_charge(page, memcg, false, false);
|
||||
__lru_cache_add_active_or_unevictable(page, vmf->vma_flags);
|
||||
lru_cache_add_active_or_unevictable(page, vma);
|
||||
} else {
|
||||
do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
|
||||
mem_cgroup_commit_charge(page, memcg, true, false);
|
||||
@@ -3358,7 +3358,7 @@ static int do_anonymous_page(struct vm_fault *vmf)
|
||||
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
|
||||
page_add_new_anon_rmap(page, vma, vmf->address, false);
|
||||
mem_cgroup_commit_charge(page, memcg, false, false);
|
||||
__lru_cache_add_active_or_unevictable(page, vmf->vma_flags);
|
||||
lru_cache_add_active_or_unevictable(page, vma);
|
||||
setpte:
|
||||
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
|
||||
|
||||
@@ -3644,7 +3644,7 @@ int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
|
||||
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
|
||||
page_add_new_anon_rmap(page, vma, vmf->address, false);
|
||||
mem_cgroup_commit_charge(page, memcg, false, false);
|
||||
__lru_cache_add_active_or_unevictable(page, vmf->vma_flags);
|
||||
lru_cache_add_active_or_unevictable(page, vma);
|
||||
} else {
|
||||
inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
|
||||
page_add_file_rmap(page, false);
|
||||
|
||||
@@ -479,12 +479,12 @@ void add_page_to_unevictable_list(struct page *page)
|
||||
* directly back onto it's zone's unevictable list, it does NOT use a
|
||||
* per cpu pagevec.
|
||||
*/
|
||||
void __lru_cache_add_active_or_unevictable(struct page *page,
|
||||
unsigned long vma_flags)
|
||||
void lru_cache_add_active_or_unevictable(struct page *page,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
VM_BUG_ON_PAGE(PageLRU(page), page);
|
||||
|
||||
if (likely((vma_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
|
||||
if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
|
||||
SetPageActive(page);
|
||||
lru_cache_add(page);
|
||||
return;
|
||||
|
||||
Reference in New Issue
Block a user