mm/vmalloc: leave lazy MMU mode on PTE mapping error
commit fea18c686320a53fce7ad62a87a3e1d10ad02f31 upstream.
vmap_pages_pte_range() enters the lazy MMU mode, but fails to leave it in
case an error is encountered.
Link: https://lkml.kernel.org/r/20250623075721.2817094-1-agordeev@linux.ibm.com
Fixes: 2ba3e6947a ("mm/vmalloc: track which page-table levels were modified")
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
Closes: https://lore.kernel.org/r/202506132017.T1l1l6ME-lkp@intel.com/
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
78f9fca779
commit
9cf2d04305
16
mm/vmalloc.c
16
mm/vmalloc.c
@@ -193,6 +193,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
|
||||
unsigned long end, pgprot_t prot, struct page **pages, int *nr,
|
||||
pgtbl_mod_mask *mask)
|
||||
{
|
||||
int err = 0;
|
||||
pte_t *pte;
|
||||
|
||||
/*
|
||||
@@ -206,15 +207,20 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
|
||||
do {
|
||||
struct page *page = pages[*nr];
|
||||
|
||||
if (WARN_ON(!pte_none(*pte)))
|
||||
return -EBUSY;
|
||||
if (WARN_ON(!page))
|
||||
return -ENOMEM;
|
||||
if (WARN_ON(!pte_none(*pte))) {
|
||||
err = -EBUSY;
|
||||
break;
|
||||
}
|
||||
if (WARN_ON(!page)) {
|
||||
err = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
|
||||
(*nr)++;
|
||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||
*mask |= PGTBL_PTE_MODIFIED;
|
||||
return 0;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int vmap_pmd_range(pud_t *pud, unsigned long addr,
|
||||
|
||||
Reference in New Issue
Block a user