Revert "mm: hugetlb: independent PMD page table shared count"

This reverts commit 94b4b41d0c which is
commit 59d9094df3d79443937add8700b2ef1a866b1081 upstream.

It breaks the Android kernel abi and can be brought back in the future
in an abi-safe way if it is really needed.

Bug: 161946584
Change-Id: Ia52587cbd71a9c7220e888fd6725327d953da03f
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2025-07-13 12:34:06 +00:00
parent da18f01fe5
commit 38afe12e2e
3 changed files with 10 additions and 14 deletions

View File

@@ -2475,9 +2475,6 @@ static inline bool pgtable_pmd_page_ctor(struct page *page)
if (!pmd_ptlock_init(page))
return false;
__SetPageTable(page);
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
atomic_set(&page->pt_share_count, 0);
#endif
inc_zone_page_state(page, NR_PAGETABLE);
return true;
}

View File

@@ -152,9 +152,6 @@ struct page {
union {
struct mm_struct *pt_mm; /* x86 pgds only */
atomic_t pt_frag_refcount; /* powerpc */
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
atomic_t pt_share_count;
#endif
};
#if ALLOC_SPLIT_PTLOCKS
spinlock_t *ptl;

View File

@@ -5478,7 +5478,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
spte = huge_pte_offset(svma->vm_mm, saddr,
vma_mmu_pagesize(svma));
if (spte) {
atomic_inc(&virt_to_page(spte)->pt_share_count);
get_page(virt_to_page(spte));
break;
}
}
@@ -5493,7 +5493,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
(pmd_t *)((unsigned long)spte & PAGE_MASK));
mm_inc_nr_pmds(mm);
} else {
atomic_dec(&virt_to_page(spte)->pt_share_count);
put_page(virt_to_page(spte));
}
spin_unlock(ptl);
out:
@@ -5504,7 +5504,11 @@ out:
/*
* unmap huge page backed by shared pte.
*
* Called with page table lock held.
* Hugetlb pte page is ref counted at the time of mapping. If pte is shared
* indicated by page_count > 1, unmap is achieved by clearing pud and
* decrementing the ref count. If count == 1, the pte page is not shared.
*
* Called with page table lock held and i_mmap_rwsem held in write mode.
*
* returns: 1 successfully unmapped a shared pte page
* 0 the underlying pte page is not shared, or it is the last user
@@ -5512,19 +5516,17 @@ out:
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long *addr, pte_t *ptep)
{
unsigned long sz = huge_page_size(hstate_vma(vma));
pgd_t *pgd = pgd_offset(mm, *addr);
p4d_t *p4d = p4d_offset(pgd, *addr);
pud_t *pud = pud_offset(p4d, *addr);
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
if (sz != PMD_SIZE)
return 0;
if (!atomic_read(&virt_to_page(ptep)->pt_share_count))
BUG_ON(page_count(virt_to_page(ptep)) == 0);
if (page_count(virt_to_page(ptep)) == 1)
return 0;
pud_clear(pud);
atomic_dec(&virt_to_page(ptep)->pt_share_count);
put_page(virt_to_page(ptep));
mm_dec_nr_pmds(mm);
/*
* This update of passed address optimizes loops sequentially