BACKPORT: FROMGIT: mm: optimization on page allocation when CMA enabled

Let us look at the timeline of scenarios below with WMARK_LOW=25MB WMARK_MIN=5MB(managed pages 1.9GB). We can find that CMA begin to be used until 'C' under the method of 'fixed 2 times of free cma over free pages' which could have the scenario 'A' and 'B' into a fault state, that is, free UNMOVABLE & RECLAIMABLE pages is lower than corresponding watermark without reclaiming which should be deemed as against current memory policy. This commit try to solve this by checking zone_watermark_ok again with removing CMA pages which could lead to a
proper time point of CMA's utilization.

-- Free_pages
|
|
-- WMARK_LOW
|
-- Free_CMA
|
|
--

Free_CMA/Free_pages(MB)      A(12/30) -->  B(12/25) -->  C(12/20)
fixed 1/2 ratio                 N             N           Y
this commit                     Y             Y           Y

Bug: 286444744
Link: https://lkml.kernel.org/r/1683782550-25799-1-git-send-email-zhaoyang.huang@unisoc.com
[zhaoyang.huang: Resolved some conflict in mm/page_alloc.c]
(cherry picked from commit 82c2bc35f3dca0bb47b21866922cfd3e596c0975
git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm mm-unstable)
Change-Id: I7627efdd0ffd06b74c08694c76fdd1f8bec8b706
Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
This commit is contained in:
zhaoyang.huang
2023-06-05 14:17:21 +08:00
committed by Treehugger Robot
parent 9ab065d281
commit 087877d515

View File

@@ -3812,6 +3812,44 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
#endif
}
#ifdef CONFIG_CMA
/*
* GFP_MOVABLE allocation could drain UNMOVABLE & RECLAIMABLE page blocks via
* the help of CMA which makes GFP_KERNEL failed. Checking if zone_watermark_ok
* again without ALLOC_CMA to see if to use CMA first.
*/
static bool use_cma_first(struct zone *zone, unsigned int order, unsigned int alloc_flags)
{
unsigned long watermark;
bool cma_first = false;
watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
/* check if GFP_MOVABLE pass previous zone_watermark_ok via the help of CMA */
if (zone_watermark_ok(zone, order, watermark, 0, alloc_flags & (~ALLOC_CMA))) {
/*
* Balance movable allocations between regular and CMA areas by
* allocating from CMA when over half of the zone's free memory
* is in the CMA area.
*/
cma_first = (zone_page_state(zone, NR_FREE_CMA_PAGES) >
zone_page_state(zone, NR_FREE_PAGES) / 2);
} else {
/*
* watermark failed means UNMOVABLE & RECLAIMBLE is not enough
* now, we should use cma first to keep them stay around the
* corresponding watermark
*/
cma_first = true;
}
return cma_first;
}
#else
static bool use_cma_first(struct zone *zone, unsigned int order, unsigned int alloc_flags)
{
return false;
}
#endif
static __always_inline
struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
unsigned int order, unsigned int alloc_flags,
@@ -3835,9 +3873,15 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
trace_mm_page_alloc_zone_locked(page, order, migratetype);
}
if (!page) {
if (alloc_flags & ALLOC_CMA && migratetype == MIGRATE_MOVABLE)
/*
* Balance movable allocations between regular and CMA areas by
* allocating from CMA base on judging zone_watermark_ok again
* to see if the latest check got pass via the help of CMA
*/
if (alloc_flags & ALLOC_CMA &&
use_cma_first(zone, order, alloc_flags))
page = __rmqueue_cma(zone, order, migratetype,
alloc_flags);
alloc_flags);
if (!page)
page = __rmqueue(zone, order, migratetype,
alloc_flags);