Merge 94b4b41d0c ("mm: hugetlb: independent PMD page table shared count") into android12-5.10-lts

Steps on the way to 5.10.239

Change-Id: Ifa64679e1d32ebe0c43a6abc7954b9ebd5620f33
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2025-07-11 10:09:28 +00:00
3 changed files with 14 additions and 10 deletions

View File

@@ -2475,6 +2475,9 @@ static inline bool pgtable_pmd_page_ctor(struct page *page)
if (!pmd_ptlock_init(page))
return false;
__SetPageTable(page);
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
atomic_set(&page->pt_share_count, 0);
#endif
inc_zone_page_state(page, NR_PAGETABLE);
return true;
}

View File

@@ -152,6 +152,9 @@ struct page {
union {
struct mm_struct *pt_mm; /* x86 pgds only */
atomic_t pt_frag_refcount; /* powerpc */
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
atomic_t pt_share_count;
#endif
};
#if ALLOC_SPLIT_PTLOCKS
spinlock_t *ptl;

View File

@@ -5478,7 +5478,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
spte = huge_pte_offset(svma->vm_mm, saddr,
vma_mmu_pagesize(svma));
if (spte) {
get_page(virt_to_page(spte));
atomic_inc(&virt_to_page(spte)->pt_share_count);
break;
}
}
@@ -5493,7 +5493,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
(pmd_t *)((unsigned long)spte & PAGE_MASK));
mm_inc_nr_pmds(mm);
} else {
put_page(virt_to_page(spte));
atomic_dec(&virt_to_page(spte)->pt_share_count);
}
spin_unlock(ptl);
out:
@@ -5504,11 +5504,7 @@ out:
/*
* unmap huge page backed by shared pte.
*
* Hugetlb pte page is ref counted at the time of mapping. If pte is shared
* indicated by page_count > 1, unmap is achieved by clearing pud and
* decrementing the ref count. If count == 1, the pte page is not shared.
*
* Called with page table lock held and i_mmap_rwsem held in write mode.
* Called with page table lock held.
*
* returns: 1 successfully unmapped a shared pte page
* 0 the underlying pte page is not shared, or it is the last user
@@ -5516,17 +5512,19 @@ out:
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long *addr, pte_t *ptep)
{
unsigned long sz = huge_page_size(hstate_vma(vma));
pgd_t *pgd = pgd_offset(mm, *addr);
p4d_t *p4d = p4d_offset(pgd, *addr);
pud_t *pud = pud_offset(p4d, *addr);
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
BUG_ON(page_count(virt_to_page(ptep)) == 0);
if (page_count(virt_to_page(ptep)) == 1)
if (sz != PMD_SIZE)
return 0;
if (!atomic_read(&virt_to_page(ptep)->pt_share_count))
return 0;
pud_clear(pud);
put_page(virt_to_page(ptep));
atomic_dec(&virt_to_page(ptep)->pt_share_count);
mm_dec_nr_pmds(mm);
/*
* This update of passed address optimizes loops sequentially