mm: reduce the time spend by killed tasks in alloc path
There are issues reported where the tasks killed by LMK holding huge amounts of memory, loops for seconds in the reclaim path, thus causing OOMs to happen from other contexts or a panic when oom path finds that there are no killable tasks. This patch brings back a change in older kernel versions to avoid reclaim when a fatal signal is pending. This is more improtant in our case unlike upstream, as we loop almost forever in reclaim path when there are LMK killable tasks (see lmk_kill_possible). Another change done by the patch is to return without sleep in too_many_isolated case for tasks with fatal signal pending. Change-Id: Icd2bb7a9602ea6566425f7918e34c218bbed21cb Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
This commit is contained in:
@@ -4118,6 +4118,9 @@ retry:
|
||||
if (current->flags & PF_MEMALLOC)
|
||||
goto nopage;
|
||||
|
||||
if (fatal_signal_pending(current) && !(gfp_mask & __GFP_NOFAIL))
|
||||
goto nopage;
|
||||
|
||||
/* Try direct reclaim and then allocating */
|
||||
page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
|
||||
&did_some_progress);
|
||||
|
||||
@@ -1857,13 +1857,13 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
||||
if (stalled)
|
||||
return 0;
|
||||
|
||||
/* wait a bit for the reclaimer. */
|
||||
msleep(100);
|
||||
stalled = true;
|
||||
|
||||
/* We are about to die and free our memory. Return now. */
|
||||
if (fatal_signal_pending(current))
|
||||
return SWAP_CLUSTER_MAX;
|
||||
|
||||
/* wait a bit for the reclaimer. */
|
||||
msleep(100);
|
||||
stalled = true;
|
||||
}
|
||||
|
||||
lru_add_drain();
|
||||
|
||||
Reference in New Issue
Block a user