Backport new vmalloc for "large performance benefits"
This is a backport from Linux 5.2-rc1 of a patch series to greatly enhance vmalloc's performance especially on embedded systems, plus all of its dependencies that were missing in kernel 4.9. For all the informations, refer to LKML: https://lkml.org/lkml/2018/10/19/786 Brief informations: Currently an allocation of the new VA area is done over busy list iteration until a suitable hole is found between two busy areas. Therefore each new allocation causes the list being grown. Due to long list and different permissive parameters an allocation can take a long time on embedded devices(milliseconds). This patch organizes the vmalloc memory layout into free areas of the VMALLOC_START-VMALLOC_END range. It uses a red-black tree that keeps blocks sorted by their offsets in pair with linked list keeping the free space in order of increasing addresses. Quote Phoronix: With this patch from Uladzislau Rezki, calling vmalloc() can take up to 67% less time compared to the behavior on Linux 5.1 and prior, at least with tests done by the developer under QEMU. Personal tests are showing that the device is more responsive when memory pressure is high and when huge allocations are to be done, it's also noticeably faster in this case, like when starting Chrome with more than 100 opened tabs after a system reboot (so, an uncached complete load of it). Shameless kanged from: https://github.com/sonyxperiadev/kernel / Pull Request 2016
This commit is contained in:
@@ -150,6 +150,7 @@ See the include/linux/kmemleak.h header for the functions prototype.
|
||||
- ``kmemleak_init`` - initialize kmemleak
|
||||
- ``kmemleak_alloc`` - notify of a memory block allocation
|
||||
- ``kmemleak_alloc_percpu`` - notify of a percpu memory block allocation
|
||||
- ``kmemleak_vmalloc`` - notify of a vmalloc() memory allocation
|
||||
- ``kmemleak_free`` - notify of a memory block freeing
|
||||
- ``kmemleak_free_part`` - notify of a partial memory block freeing
|
||||
- ``kmemleak_free_percpu`` - notify of a percpu memory block freeing
|
||||
|
||||
@@ -227,6 +227,9 @@ config ARCH_HAS_FORTIFY_SOURCE
|
||||
An architecture should select this when it can successfully
|
||||
build and run with CONFIG_FORTIFY_SOURCE.
|
||||
|
||||
config ARCH_HAS_SET_MEMORY
|
||||
bool
|
||||
|
||||
config FORTIFY_COMPILE_CHECK
|
||||
depends on ARCH_HAS_FORTIFY_SOURCE
|
||||
bool
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
#include <asm/shmparam.h>
|
||||
#include <asm/cachetype.h>
|
||||
#include <asm/outercache.h>
|
||||
#include <asm/set_memory.h>
|
||||
|
||||
#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
|
||||
|
||||
|
||||
32
arch/arm/include/asm/set_memory.h
Normal file
32
arch/arm/include/asm/set_memory.h
Normal file
@@ -0,0 +1,32 @@
|
||||
/*
|
||||
* Copyright (C) 1999-2002 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASMARM_SET_MEMORY_H
|
||||
#define _ASMARM_SET_MEMORY_H
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
int set_memory_ro(unsigned long addr, int numpages);
|
||||
int set_memory_rw(unsigned long addr, int numpages);
|
||||
int set_memory_x(unsigned long addr, int numpages);
|
||||
int set_memory_nx(unsigned long addr, int numpages);
|
||||
#else
|
||||
static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
|
||||
static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
|
||||
static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
|
||||
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_STRICT_KERNEL_RWX
|
||||
void set_kernel_text_rw(void);
|
||||
void set_kernel_text_ro(void);
|
||||
#else
|
||||
static inline void set_kernel_text_rw(void) { }
|
||||
static inline void set_kernel_text_ro(void) { }
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@@ -92,8 +92,7 @@ void __init add_static_vm_early(struct static_vm *svm)
|
||||
void *vaddr;
|
||||
|
||||
vm = &svm->vm;
|
||||
if (!vm_area_check_early(vm))
|
||||
vm_area_add_early(vm);
|
||||
vm_area_add_early(vm);
|
||||
vaddr = vm->addr;
|
||||
|
||||
list_for_each_entry(curr_svm, &static_vmlist, list) {
|
||||
|
||||
@@ -1462,21 +1462,12 @@ static void __init map_lowmem(void)
|
||||
phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
|
||||
#endif
|
||||
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
|
||||
struct static_vm *svm;
|
||||
phys_addr_t start;
|
||||
phys_addr_t end;
|
||||
unsigned long vaddr;
|
||||
unsigned long pfn;
|
||||
unsigned long length;
|
||||
unsigned int type;
|
||||
int nr = 0;
|
||||
|
||||
/* Map all the lowmem memory banks. */
|
||||
for_each_memblock(memory, reg) {
|
||||
phys_addr_t start = reg->base;
|
||||
phys_addr_t end = start + reg->size;
|
||||
struct map_desc map;
|
||||
start = reg->base;
|
||||
end = start + reg->size;
|
||||
nr++;
|
||||
|
||||
if (memblock_is_nomap(reg))
|
||||
continue;
|
||||
@@ -1528,34 +1519,6 @@ static void __init map_lowmem(void)
|
||||
}
|
||||
}
|
||||
}
|
||||
svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
struct vm_struct *vm;
|
||||
|
||||
start = reg->base;
|
||||
end = start + reg->size;
|
||||
|
||||
if (end > arm_lowmem_limit)
|
||||
end = arm_lowmem_limit;
|
||||
if (start >= end)
|
||||
break;
|
||||
|
||||
vm = &svm->vm;
|
||||
pfn = __phys_to_pfn(start);
|
||||
vaddr = __phys_to_virt(start);
|
||||
length = end - start;
|
||||
type = MT_MEMORY_RW;
|
||||
|
||||
vm->addr = (void *)(vaddr & PAGE_MASK);
|
||||
vm->size = PAGE_ALIGN(length + (vaddr & ~PAGE_MASK));
|
||||
vm->phys_addr = __pfn_to_phys(pfn);
|
||||
vm->flags = VM_LOWMEM;
|
||||
vm->flags |= VM_ARM_MTYPE(type);
|
||||
vm->caller = map_lowmem;
|
||||
add_static_vm_early(svm++);
|
||||
mark_vmalloc_reserved_area(vm->addr, vm->size);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM_PV_FIXUP
|
||||
|
||||
@@ -23,6 +23,7 @@ config ARM64
|
||||
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
|
||||
select ARCH_WANT_FRAME_POINTERS
|
||||
select ARCH_HAS_UBSAN_SANITIZE_ALL
|
||||
select ARCH_HAS_SET_MEMORY
|
||||
select ARM_AMBA
|
||||
select ARM_ARCH_TIMER
|
||||
select HAVE_KERNEL_GZIP
|
||||
|
||||
@@ -31,6 +31,7 @@ generic-y += rwsem.h
|
||||
generic-y += segment.h
|
||||
generic-y += sembuf.h
|
||||
generic-y += serial.h
|
||||
generic-y += set_memory.h
|
||||
generic-y += shmbuf.h
|
||||
generic-y += simd.h
|
||||
generic-y += sizes.h
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#define __ASM_CACHEFLUSH_H
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <asm/set_memory.h>
|
||||
|
||||
/*
|
||||
* This flag is used to indicate that the page pointed to by a pte is clean
|
||||
|
||||
@@ -43,7 +43,7 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
|
||||
ClearPageReserved(virt_to_page(addr));
|
||||
init_page_count(virt_to_page(addr));
|
||||
free_page(addr);
|
||||
totalram_pages++;
|
||||
totalram_pages_inc();
|
||||
}
|
||||
|
||||
printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
|
||||
|
||||
@@ -218,7 +218,7 @@ void *module_alloc(unsigned long size)
|
||||
* easier than trying to map the text, data, init_text and
|
||||
* init_data correctly */
|
||||
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
|
||||
GFP_KERNEL | __GFP_HIGHMEM,
|
||||
GFP_KERNEL,
|
||||
PAGE_KERNEL_RWX, 0, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
@@ -174,7 +174,7 @@ static long cmm_alloc_pages(long nr)
|
||||
|
||||
pa->page[pa->index++] = addr;
|
||||
loaned_pages++;
|
||||
totalram_pages--;
|
||||
totalram_pages_dec();
|
||||
spin_unlock(&cmm_lock);
|
||||
nr--;
|
||||
}
|
||||
@@ -213,7 +213,7 @@ static long cmm_free_pages(long nr)
|
||||
free_page(addr);
|
||||
loaned_pages--;
|
||||
nr--;
|
||||
totalram_pages++;
|
||||
totalram_pages_inc();
|
||||
}
|
||||
spin_unlock(&cmm_lock);
|
||||
cmm_dbg("End request with %ld pages unfulfilled\n", nr);
|
||||
@@ -257,7 +257,7 @@ static void cmm_get_mpp(void)
|
||||
int rc;
|
||||
struct hvcall_mpp_data mpp_data;
|
||||
signed long active_pages_target, page_loan_request, target;
|
||||
signed long total_pages = totalram_pages + loaned_pages;
|
||||
signed long total_pages = totalram_pages() + loaned_pages;
|
||||
signed long min_mem_pages = (min_mem_mb * 1024 * 1024) / PAGE_SIZE;
|
||||
|
||||
rc = h_get_mpp(&mpp_data);
|
||||
@@ -288,7 +288,7 @@ static void cmm_get_mpp(void)
|
||||
|
||||
cmm_dbg("delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n",
|
||||
page_loan_request, loaned_pages, loaned_pages_target,
|
||||
oom_freed_pages, totalram_pages);
|
||||
oom_freed_pages, totalram_pages());
|
||||
}
|
||||
|
||||
static struct notifier_block cmm_oom_nb = {
|
||||
@@ -552,7 +552,7 @@ static int cmm_mem_going_offline(void *arg)
|
||||
free_page(pa_curr->page[idx]);
|
||||
freed++;
|
||||
loaned_pages--;
|
||||
totalram_pages++;
|
||||
totalram_pages_inc();
|
||||
pa_curr->page[idx] = pa_last->page[--pa_last->index];
|
||||
if (pa_last->index == 0) {
|
||||
if (pa_curr == pa_last)
|
||||
|
||||
31
arch/s390/include/asm/set_memory.h
Normal file
31
arch/s390/include/asm/set_memory.h
Normal file
@@ -0,0 +1,31 @@
|
||||
#ifndef _ASMS390_SET_MEMORY_H
|
||||
#define _ASMS390_SET_MEMORY_H
|
||||
|
||||
#define SET_MEMORY_RO 1UL
|
||||
#define SET_MEMORY_RW 2UL
|
||||
#define SET_MEMORY_NX 4UL
|
||||
#define SET_MEMORY_X 8UL
|
||||
|
||||
int __set_memory(unsigned long addr, int numpages, unsigned long flags);
|
||||
|
||||
static inline int set_memory_ro(unsigned long addr, int numpages)
|
||||
{
|
||||
return __set_memory(addr, numpages, SET_MEMORY_RO);
|
||||
}
|
||||
|
||||
static inline int set_memory_rw(unsigned long addr, int numpages)
|
||||
{
|
||||
return __set_memory(addr, numpages, SET_MEMORY_RW);
|
||||
}
|
||||
|
||||
static inline int set_memory_nx(unsigned long addr, int numpages)
|
||||
{
|
||||
return __set_memory(addr, numpages, SET_MEMORY_NX);
|
||||
}
|
||||
|
||||
static inline int set_memory_x(unsigned long addr, int numpages)
|
||||
{
|
||||
return __set_memory(addr, numpages, SET_MEMORY_X);
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -56,7 +56,7 @@ static void __init setup_zero_pages(void)
|
||||
order = 7;
|
||||
|
||||
/* Limit number of empty zero pages for small memory sizes */
|
||||
while (order > 2 && (totalram_pages >> 10) < (1UL << order))
|
||||
while (order > 2 && (totalram_pages() >> 10) < (1UL << order))
|
||||
order--;
|
||||
|
||||
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
|
||||
|
||||
@@ -61,7 +61,7 @@ void show_mem(unsigned int filter)
|
||||
show_free_areas(filter);
|
||||
printk("Free swap: %6ldkB\n",
|
||||
get_nr_swap_pages() << (PAGE_SHIFT-10));
|
||||
printk("%ld pages of RAM\n", totalram_pages);
|
||||
printk("%ld pages of RAM\n", totalram_pages());
|
||||
printk("%ld free pages\n", nr_free_pages());
|
||||
}
|
||||
|
||||
|
||||
@@ -708,7 +708,7 @@ static void __init set_non_bootmem_pages_init(void)
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
if (idx == ZONE_HIGHMEM)
|
||||
totalhigh_pages += z->spanned_pages;
|
||||
totalhigh_pages_add(z->spanned_pages);
|
||||
#endif
|
||||
if (kdata_huge) {
|
||||
unsigned long percpu_pfn = node_percpu_pfn[nid];
|
||||
|
||||
@@ -53,8 +53,8 @@ void __init mem_init(void)
|
||||
|
||||
/* this will put all low memory onto the freelists */
|
||||
free_all_bootmem();
|
||||
max_low_pfn = totalram_pages;
|
||||
max_pfn = totalram_pages;
|
||||
max_low_pfn = totalram_pages();
|
||||
max_pfn = totalram_pages();
|
||||
mem_init_print_info(NULL);
|
||||
kmalloc_ok = 1;
|
||||
}
|
||||
|
||||
87
arch/x86/include/asm/set_memory.h
Normal file
87
arch/x86/include/asm/set_memory.h
Normal file
@@ -0,0 +1,87 @@
|
||||
#ifndef _ASM_X86_SET_MEMORY_H
|
||||
#define _ASM_X86_SET_MEMORY_H
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm-generic/set_memory.h>
|
||||
|
||||
/*
|
||||
* The set_memory_* API can be used to change various attributes of a virtual
|
||||
* address range. The attributes include:
|
||||
* Cachability : UnCached, WriteCombining, WriteThrough, WriteBack
|
||||
* Executability : eXeutable, NoteXecutable
|
||||
* Read/Write : ReadOnly, ReadWrite
|
||||
* Presence : NotPresent
|
||||
*
|
||||
* Within a category, the attributes are mutually exclusive.
|
||||
*
|
||||
* The implementation of this API will take care of various aspects that
|
||||
* are associated with changing such attributes, such as:
|
||||
* - Flushing TLBs
|
||||
* - Flushing CPU caches
|
||||
* - Making sure aliases of the memory behind the mapping don't violate
|
||||
* coherency rules as defined by the CPU in the system.
|
||||
*
|
||||
* What this API does not do:
|
||||
* - Provide exclusion between various callers - including callers that
|
||||
* operation on other mappings of the same physical page
|
||||
* - Restore default attributes when a page is freed
|
||||
* - Guarantee that mappings other than the requested one are
|
||||
* in any state, other than that these do not violate rules for
|
||||
* the CPU you have. Do not depend on any effects on other mappings,
|
||||
* CPUs other than the one you have may have more relaxed rules.
|
||||
* The caller is required to take care of these.
|
||||
*/
|
||||
|
||||
int _set_memory_uc(unsigned long addr, int numpages);
|
||||
int _set_memory_wc(unsigned long addr, int numpages);
|
||||
int _set_memory_wt(unsigned long addr, int numpages);
|
||||
int _set_memory_wb(unsigned long addr, int numpages);
|
||||
int set_memory_uc(unsigned long addr, int numpages);
|
||||
int set_memory_wc(unsigned long addr, int numpages);
|
||||
int set_memory_wt(unsigned long addr, int numpages);
|
||||
int set_memory_wb(unsigned long addr, int numpages);
|
||||
int set_memory_np(unsigned long addr, int numpages);
|
||||
int set_memory_4k(unsigned long addr, int numpages);
|
||||
|
||||
int set_memory_array_uc(unsigned long *addr, int addrinarray);
|
||||
int set_memory_array_wc(unsigned long *addr, int addrinarray);
|
||||
int set_memory_array_wt(unsigned long *addr, int addrinarray);
|
||||
int set_memory_array_wb(unsigned long *addr, int addrinarray);
|
||||
|
||||
int set_pages_array_uc(struct page **pages, int addrinarray);
|
||||
int set_pages_array_wc(struct page **pages, int addrinarray);
|
||||
int set_pages_array_wt(struct page **pages, int addrinarray);
|
||||
int set_pages_array_wb(struct page **pages, int addrinarray);
|
||||
|
||||
/*
|
||||
* For legacy compatibility with the old APIs, a few functions
|
||||
* are provided that work on a "struct page".
|
||||
* These functions operate ONLY on the 1:1 kernel mapping of the
|
||||
* memory that the struct page represents, and internally just
|
||||
* call the set_memory_* function. See the description of the
|
||||
* set_memory_* function for more details on conventions.
|
||||
*
|
||||
* These APIs should be considered *deprecated* and are likely going to
|
||||
* be removed in the future.
|
||||
* The reason for this is the implicit operation on the 1:1 mapping only,
|
||||
* making this not a generally useful API.
|
||||
*
|
||||
* Specifically, many users of the old APIs had a virtual address,
|
||||
* called virt_to_page() or vmalloc_to_page() on that address to
|
||||
* get a struct page* that the old API required.
|
||||
* To convert these cases, use set_memory_*() on the original
|
||||
* virtual address, do not use these functions.
|
||||
*/
|
||||
|
||||
int set_pages_uc(struct page *page, int numpages);
|
||||
int set_pages_wb(struct page *page, int numpages);
|
||||
int set_pages_x(struct page *page, int numpages);
|
||||
int set_pages_nx(struct page *page, int numpages);
|
||||
int set_pages_ro(struct page *page, int numpages);
|
||||
int set_pages_rw(struct page *page, int numpages);
|
||||
|
||||
extern int kernel_set_to_readonly;
|
||||
void set_kernel_text_rw(void);
|
||||
void set_kernel_text_ro(void);
|
||||
|
||||
#endif /* _ASM_X86_SET_MEMORY_H */
|
||||
@@ -325,8 +325,8 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,
|
||||
{
|
||||
ssize_t ret = -EINVAL;
|
||||
|
||||
if ((len >> PAGE_SHIFT) > totalram_pages) {
|
||||
pr_err("too much data (max %ld pages)\n", totalram_pages);
|
||||
if ((len >> PAGE_SHIFT) > totalram_pages()) {
|
||||
pr_err("too much data (max %ld pages)\n", totalram_pages());
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -85,7 +85,7 @@ void *module_alloc(unsigned long size)
|
||||
|
||||
p = __vmalloc_node_range(size, MODULE_ALIGN,
|
||||
MODULES_VADDR + get_module_load_offset(),
|
||||
MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
|
||||
MODULES_END, GFP_KERNEL,
|
||||
PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
if (p && (kasan_module_alloc(p, size) < 0)) {
|
||||
|
||||
@@ -168,8 +168,8 @@ static void recalculate_apic_map(struct kvm *kvm)
|
||||
if (kvm_apic_present(vcpu))
|
||||
max_id = max(max_id, kvm_apic_id(vcpu->arch.apic));
|
||||
|
||||
new = kvm_kvzalloc(sizeof(struct kvm_apic_map) +
|
||||
sizeof(struct kvm_lapic *) * ((u64)max_id + 1));
|
||||
new = kvzalloc(sizeof(struct kvm_apic_map) +
|
||||
sizeof(struct kvm_lapic *) * ((u64)max_id + 1), GFP_KERNEL);
|
||||
|
||||
if (!new)
|
||||
goto out;
|
||||
|
||||
@@ -38,8 +38,8 @@ int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
|
||||
int i;
|
||||
|
||||
for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
|
||||
slot->arch.gfn_track[i] = kvm_kvzalloc(npages *
|
||||
sizeof(*slot->arch.gfn_track[i]));
|
||||
slot->arch.gfn_track[i] = kvzalloc(npages *
|
||||
sizeof(*slot->arch.gfn_track[i]), GFP_KERNEL);
|
||||
if (!slot->arch.gfn_track[i])
|
||||
goto track_free;
|
||||
}
|
||||
|
||||
@@ -8358,13 +8358,13 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
slot->base_gfn, level) + 1;
|
||||
|
||||
slot->arch.rmap[i] =
|
||||
kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i]));
|
||||
kvzalloc(lpages * sizeof(*slot->arch.rmap[i]), GFP_KERNEL);
|
||||
if (!slot->arch.rmap[i])
|
||||
goto out_free;
|
||||
if (i == 0)
|
||||
continue;
|
||||
|
||||
linfo = kvm_kvzalloc(lpages * sizeof(*linfo));
|
||||
linfo = kvzalloc(lpages * sizeof(*linfo), GFP_KERNEL);
|
||||
if (!linfo)
|
||||
goto out_free;
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/swap.h> /* for totalram_pages */
|
||||
#include <linux/swap.h> /* for totalram_pages() */
|
||||
#include <linux/bootmem.h>
|
||||
|
||||
void *kmap(struct page *page)
|
||||
|
||||
@@ -1897,8 +1897,6 @@ int set_pages_rw(struct page *page, int numpages)
|
||||
return set_memory_rw(addr, numpages);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
|
||||
static int __set_pages_p(struct page *page, int numpages)
|
||||
{
|
||||
unsigned long tempaddr = (unsigned long) page_address(page);
|
||||
@@ -1937,6 +1935,17 @@ static int __set_pages_np(struct page *page, int numpages)
|
||||
return __change_page_attr_set_clr(&cpa, 0);
|
||||
}
|
||||
|
||||
int set_direct_map_invalid_noflush(struct page *page)
|
||||
{
|
||||
return __set_pages_np(page, 1);
|
||||
}
|
||||
|
||||
int set_direct_map_default_noflush(struct page *page)
|
||||
{
|
||||
return __set_pages_p(page, 1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
void __kernel_map_pages(struct page *page, int numpages, int enable)
|
||||
{
|
||||
if (PageHighMem(page))
|
||||
@@ -1966,7 +1975,6 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
|
||||
bool kernel_page_present(struct page *page)
|
||||
{
|
||||
unsigned int level;
|
||||
|
||||
@@ -409,7 +409,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
|
||||
new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN);
|
||||
if (!new_pages) {
|
||||
new_pages = __vmalloc(bytes,
|
||||
GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO,
|
||||
GFP_NOIO | __GFP_ZERO,
|
||||
PAGE_KERNEL);
|
||||
if (!new_pages)
|
||||
return NULL;
|
||||
|
||||
@@ -115,9 +115,9 @@ static int agp_find_max(void)
|
||||
long memory, index, result;
|
||||
|
||||
#if PAGE_SHIFT < 20
|
||||
memory = totalram_pages >> (20 - PAGE_SHIFT);
|
||||
memory = totalram_pages() >> (20 - PAGE_SHIFT);
|
||||
#else
|
||||
memory = totalram_pages << (PAGE_SHIFT - 20);
|
||||
memory = totalram_pages() << (PAGE_SHIFT - 20);
|
||||
#endif
|
||||
index = 1;
|
||||
|
||||
|
||||
@@ -1048,16 +1048,16 @@ static unsigned long compute_balloon_floor(void)
|
||||
* 8192 744 (1/16)
|
||||
* 32768 1512 (1/32)
|
||||
*/
|
||||
if (totalram_pages < MB2PAGES(128))
|
||||
min_pages = MB2PAGES(8) + (totalram_pages >> 1);
|
||||
else if (totalram_pages < MB2PAGES(512))
|
||||
min_pages = MB2PAGES(40) + (totalram_pages >> 2);
|
||||
else if (totalram_pages < MB2PAGES(2048))
|
||||
min_pages = MB2PAGES(104) + (totalram_pages >> 3);
|
||||
else if (totalram_pages < MB2PAGES(8192))
|
||||
min_pages = MB2PAGES(232) + (totalram_pages >> 4);
|
||||
if (totalram_pages() < MB2PAGES(128))
|
||||
min_pages = MB2PAGES(8) + (totalram_pages() >> 1);
|
||||
else if (totalram_pages() < MB2PAGES(512))
|
||||
min_pages = MB2PAGES(40) + (totalram_pages() >> 2);
|
||||
else if (totalram_pages() < MB2PAGES(2048))
|
||||
min_pages = MB2PAGES(104) + (totalram_pages() >> 3);
|
||||
else if (totalram_pages() < MB2PAGES(8192))
|
||||
min_pages = MB2PAGES(232) + (totalram_pages() >> 4);
|
||||
else
|
||||
min_pages = MB2PAGES(488) + (totalram_pages >> 5);
|
||||
min_pages = MB2PAGES(488) + (totalram_pages() >> 5);
|
||||
#undef MB2PAGES
|
||||
return min_pages;
|
||||
}
|
||||
|
||||
@@ -421,14 +421,13 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
|
||||
*/
|
||||
if (gfp_mask & __GFP_NORETRY) {
|
||||
unsigned noio_flag = memalloc_noio_save();
|
||||
void *ptr = __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM,
|
||||
PAGE_KERNEL);
|
||||
void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
|
||||
|
||||
memalloc_noio_restore(noio_flag);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
return __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM, PAGE_KERNEL);
|
||||
return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1907,7 +1906,7 @@ static int __init dm_bufio_init(void)
|
||||
memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
|
||||
memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
|
||||
|
||||
mem = (__u64)mult_frac(totalram_pages - totalhigh_pages,
|
||||
mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
|
||||
DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
|
||||
|
||||
if (mem > ULONG_MAX)
|
||||
|
||||
@@ -84,7 +84,7 @@ static bool __check_shared_memory(size_t alloc_size)
|
||||
a = shared_memory_amount + alloc_size;
|
||||
if (a < shared_memory_amount)
|
||||
return false;
|
||||
if (a >> PAGE_SHIFT > totalram_pages / DM_STATS_MEMORY_FACTOR)
|
||||
if (a >> PAGE_SHIFT > totalram_pages() / DM_STATS_MEMORY_FACTOR)
|
||||
return false;
|
||||
#ifdef CONFIG_MMU
|
||||
if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR)
|
||||
@@ -146,12 +146,7 @@ static void *dm_kvzalloc(size_t alloc_size, int node)
|
||||
if (!claim_shared_memory(alloc_size))
|
||||
return NULL;
|
||||
|
||||
if (alloc_size <= KMALLOC_MAX_SIZE) {
|
||||
p = kzalloc_node(alloc_size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN, node);
|
||||
if (p)
|
||||
return p;
|
||||
}
|
||||
p = vzalloc_node(alloc_size, node);
|
||||
p = kvzalloc_node(alloc_size, GFP_KERNEL | __GFP_NOMEMALLOC, node);
|
||||
if (p)
|
||||
return p;
|
||||
|
||||
|
||||
@@ -841,7 +841,7 @@ static int mtk_vpu_probe(struct platform_device *pdev)
|
||||
/* Set PTCM to 96K and DTCM to 32K */
|
||||
vpu_cfg_writel(vpu, 0x2, VPU_TCM_CFG);
|
||||
|
||||
vpu->enable_4GB = !!(totalram_pages > (SZ_2G >> PAGE_SHIFT));
|
||||
vpu->enable_4GB = !!(totalram_pages() > (SZ_2G >> PAGE_SHIFT));
|
||||
dev_info(dev, "4GB mode %u\n", vpu->enable_4GB);
|
||||
|
||||
if (vpu->enable_4GB) {
|
||||
|
||||
@@ -1272,7 +1272,7 @@ ccio_ioc_init(struct ioc *ioc)
|
||||
** Hot-Plug/Removal of PCI cards. (aka PCI OLARD).
|
||||
*/
|
||||
|
||||
iova_space_size = (u32) (totalram_pages / count_parisc_driver(&ccio_driver));
|
||||
iova_space_size = (u32) (totalram_pages() / count_parisc_driver(&ccio_driver));
|
||||
|
||||
/* limit IOVA space size to 1MB-1GB */
|
||||
|
||||
@@ -1311,7 +1311,7 @@ ccio_ioc_init(struct ioc *ioc)
|
||||
|
||||
DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n",
|
||||
__func__, ioc->ioc_regs,
|
||||
(unsigned long) totalram_pages >> (20 - PAGE_SHIFT),
|
||||
(unsigned long) totalram_pages() >> (20 - PAGE_SHIFT),
|
||||
iova_space_size>>20,
|
||||
iov_order + PAGE_SHIFT);
|
||||
|
||||
|
||||
@@ -1436,7 +1436,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
|
||||
DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
|
||||
__func__,
|
||||
ioc->ioc_hpa,
|
||||
(unsigned long) totalram_pages >> (20 - PAGE_SHIFT),
|
||||
(unsigned long) totalram_pages() >> (20 - PAGE_SHIFT),
|
||||
iova_space_size>>20,
|
||||
iov_order + PAGE_SHIFT);
|
||||
|
||||
|
||||
@@ -379,7 +379,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
|
||||
if (align > PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
if (size / PAGE_SIZE > totalram_pages / 2)
|
||||
if (size / PAGE_SIZE > totalram_pages() / 2)
|
||||
return -ENOMEM;
|
||||
|
||||
data.size = 0;
|
||||
|
||||
@@ -42,7 +42,7 @@
|
||||
#if BITS_PER_LONG == 32
|
||||
/* limit to lowmem on 32-bit systems */
|
||||
#define NUM_CACHEPAGES \
|
||||
min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4)
|
||||
min(totalram_pages(), 1UL << (30 - PAGE_SHIFT) * 3 / 4)
|
||||
#else
|
||||
#define NUM_CACHEPAGES totalram_pages
|
||||
#endif
|
||||
|
||||
@@ -249,7 +249,7 @@ void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
|
||||
|
||||
int cfs_trace_max_debug_mb(void)
|
||||
{
|
||||
int total_mb = (totalram_pages >> (20 - PAGE_SHIFT));
|
||||
int total_mb = (totalram_pages() >> (20 - PAGE_SHIFT));
|
||||
|
||||
return max(512, (total_mb * 80) / 100);
|
||||
}
|
||||
|
||||
@@ -1217,8 +1217,8 @@ static inline void client_adjust_max_dirty(struct client_obd *cli)
|
||||
cli->cl_dirty_max_pages = dirty_max;
|
||||
}
|
||||
|
||||
if (cli->cl_dirty_max_pages > totalram_pages / 8)
|
||||
cli->cl_dirty_max_pages = totalram_pages / 8;
|
||||
if (cli->cl_dirty_max_pages > totalram_pages() / 8)
|
||||
cli->cl_dirty_max_pages = totalram_pages() / 8;
|
||||
}
|
||||
|
||||
#endif /* __OBD_H */
|
||||
|
||||
@@ -363,11 +363,11 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
|
||||
|
||||
if (!strcmp(name, LUSTRE_MDC_NAME)) {
|
||||
cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT;
|
||||
} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) {
|
||||
} else if (totalram_pages() >> (20 - PAGE_SHIFT) <= 128 /* MB */) {
|
||||
cli->cl_max_rpcs_in_flight = 2;
|
||||
} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */) {
|
||||
} else if (totalram_pages() >> (20 - PAGE_SHIFT) <= 256 /* MB */) {
|
||||
cli->cl_max_rpcs_in_flight = 3;
|
||||
} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */) {
|
||||
} else if (totalram_pages() >> (20 - PAGE_SHIFT) <= 512 /* MB */) {
|
||||
cli->cl_max_rpcs_in_flight = 4;
|
||||
} else {
|
||||
cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT;
|
||||
|
||||
@@ -244,9 +244,9 @@ static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
|
||||
|
||||
pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
|
||||
|
||||
if (pages_number > totalram_pages / 2) {
|
||||
if (pages_number > totalram_pages() / 2) {
|
||||
CERROR("can't set file readahead more than %lu MB\n",
|
||||
totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/
|
||||
totalram_pages() >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
@@ -411,10 +411,10 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
|
||||
return -ERANGE;
|
||||
pages_number = (long)val;
|
||||
|
||||
if (pages_number < 0 || pages_number > totalram_pages) {
|
||||
if (pages_number < 0 || pages_number > totalram_pages()) {
|
||||
CERROR("%s: can't set max cache more than %lu MB\n",
|
||||
ll_get_fsname(sb, NULL, 0),
|
||||
totalram_pages >> (20 - PAGE_SHIFT));
|
||||
totalram_pages() >> (20 - PAGE_SHIFT));
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
|
||||
@@ -477,10 +477,10 @@ static int __init obdclass_init(void)
|
||||
* For clients with less memory, a larger fraction is needed
|
||||
* for other purposes (mostly for BGL).
|
||||
*/
|
||||
if (totalram_pages <= 512 << (20 - PAGE_SHIFT))
|
||||
obd_max_dirty_pages = totalram_pages / 4;
|
||||
if (totalram_pages() <= 512 << (20 - PAGE_SHIFT))
|
||||
obd_max_dirty_pages = totalram_pages() / 4;
|
||||
else
|
||||
obd_max_dirty_pages = totalram_pages / 2;
|
||||
obd_max_dirty_pages = totalram_pages() / 2;
|
||||
|
||||
err = obd_init_caches();
|
||||
if (err)
|
||||
|
||||
@@ -112,7 +112,7 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
|
||||
|
||||
val *= 1 << (20 - PAGE_SHIFT); /* convert to pages */
|
||||
|
||||
if (val > ((totalram_pages / 10) * 9)) {
|
||||
if (val > ((totalram_pages() / 10) * 9)) {
|
||||
/* Somebody wants to assign too much memory to dirty pages */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -884,7 +884,7 @@ static unsigned long lu_htable_order(struct lu_device *top)
|
||||
*
|
||||
* Size of lu_object is (arbitrary) taken as 1K (together with inode).
|
||||
*/
|
||||
cache_size = totalram_pages;
|
||||
cache_size = totalram_pages();
|
||||
|
||||
#if BITS_PER_LONG == 32
|
||||
/* limit hashtable size for lowmem systems to low RAM */
|
||||
|
||||
@@ -163,7 +163,7 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj,
|
||||
|
||||
if (pages_number <= 0 ||
|
||||
pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) ||
|
||||
pages_number > totalram_pages / 4) /* 1/4 of RAM */
|
||||
pages_number > totalram_pages() / 4) /* 1/4 of RAM */
|
||||
return -ERANGE;
|
||||
|
||||
spin_lock(&cli->cl_loi_list_lock);
|
||||
|
||||
@@ -306,7 +306,7 @@ ptlrpc_lprocfs_req_history_max_seq_write(struct file *file,
|
||||
* far.
|
||||
*/
|
||||
bufpages = (svc->srv_buf_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
if (val > totalram_pages / (2 * bufpages))
|
||||
if (val > totalram_pages() / (2 * bufpages))
|
||||
return -ERANGE;
|
||||
|
||||
spin_lock(&svc->srv_lock);
|
||||
|
||||
@@ -140,7 +140,7 @@ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
|
||||
"low free mark: %lu\n"
|
||||
"max waitqueue depth: %u\n"
|
||||
"max wait time: %ld/%lu\n",
|
||||
totalram_pages,
|
||||
totalram_pages(),
|
||||
PAGES_PER_POOL,
|
||||
page_pools.epp_max_pages,
|
||||
page_pools.epp_max_pools,
|
||||
@@ -378,7 +378,7 @@ int sptlrpc_enc_pool_init(void)
|
||||
* maximum capacity is 1/8 of total physical memory.
|
||||
* is the 1/8 a good number?
|
||||
*/
|
||||
page_pools.epp_max_pages = totalram_pages / 8;
|
||||
page_pools.epp_max_pages = totalram_pages() / 8;
|
||||
page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
|
||||
|
||||
init_waitqueue_head(&page_pools.epp_waitq);
|
||||
|
||||
@@ -747,7 +747,7 @@ static void __init balloon_add_region(unsigned long start_pfn,
|
||||
|
||||
for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
|
||||
page = pfn_to_page(pfn);
|
||||
/* totalram_pages and totalhigh_pages do not
|
||||
/* totalram_pages() and totalhigh_pages() do not
|
||||
include the boot-time balloon extension, so
|
||||
don't subtract from it. */
|
||||
__balloon_append(page);
|
||||
|
||||
@@ -188,7 +188,7 @@ static void selfballoon_process(struct work_struct *work)
|
||||
bool reset_timer = false;
|
||||
|
||||
if (xen_selfballooning_enabled) {
|
||||
cur_pages = totalram_pages;
|
||||
cur_pages = totalram_pages();
|
||||
tgt_pages = cur_pages; /* default is no change */
|
||||
goal_pages = vm_memory_committed() +
|
||||
totalreserve_pages +
|
||||
@@ -226,7 +226,7 @@ static void selfballoon_process(struct work_struct *work)
|
||||
if (tgt_pages < floor_pages)
|
||||
tgt_pages = floor_pages;
|
||||
balloon_set_new_target(tgt_pages +
|
||||
balloon_stats.current_pages - totalram_pages);
|
||||
balloon_stats.current_pages - totalram_pages());
|
||||
reset_timer = true;
|
||||
}
|
||||
#ifdef CONFIG_FRONTSWAP
|
||||
@@ -568,7 +568,7 @@ int xen_selfballoon_init(bool use_selfballooning, bool use_frontswap_selfshrink)
|
||||
* much more reliably and response faster in some cases.
|
||||
*/
|
||||
if (!selfballoon_reserved_mb) {
|
||||
reserve_pages = totalram_pages / 10;
|
||||
reserve_pages = totalram_pages() / 10;
|
||||
selfballoon_reserved_mb = PAGES2MB(reserve_pages);
|
||||
}
|
||||
schedule_delayed_work(&selfballoon_worker, selfballoon_interval * HZ);
|
||||
|
||||
@@ -167,8 +167,7 @@ static u8 *alloc_bitmap(u32 bitmap_size)
|
||||
if (mem)
|
||||
return mem;
|
||||
|
||||
return __vmalloc(bitmap_size, GFP_NOFS | __GFP_HIGHMEM | __GFP_ZERO,
|
||||
PAGE_KERNEL);
|
||||
return __vmalloc(bitmap_size, GFP_NOFS | __GFP_ZERO, PAGE_KERNEL);
|
||||
}
|
||||
|
||||
int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
|
||||
|
||||
@@ -2387,7 +2387,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
|
||||
return 0;
|
||||
|
||||
size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
|
||||
new_groupinfo = ext4_kvzalloc(size, GFP_KERNEL);
|
||||
new_groupinfo = kvzalloc(size, GFP_KERNEL);
|
||||
if (!new_groupinfo) {
|
||||
ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -2130,8 +2130,8 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
|
||||
if (size <= sbi->s_flex_groups_allocated)
|
||||
return 0;
|
||||
|
||||
new_groups = ext4_kvzalloc(roundup_pow_of_two(size *
|
||||
sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
|
||||
new_groups = kvzalloc(roundup_pow_of_two(size *
|
||||
sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
|
||||
if (!new_groups) {
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
"not enough memory for %d flex group pointers", size);
|
||||
@@ -3993,7 +3993,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
||||
}
|
||||
}
|
||||
rcu_assign_pointer(sbi->s_group_desc,
|
||||
ext4_kvmalloc(db_count *
|
||||
kvmalloc(db_count *
|
||||
sizeof(struct buffer_head *),
|
||||
GFP_KERNEL));
|
||||
if (sbi->s_group_desc == NULL) {
|
||||
|
||||
@@ -42,7 +42,7 @@ static void *alloc_fdmem(size_t size)
|
||||
if (data != NULL)
|
||||
return data;
|
||||
}
|
||||
return __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_HIGHMEM, PAGE_KERNEL);
|
||||
return __vmalloc(size, GFP_KERNEL_ACCOUNT, PAGE_KERNEL);
|
||||
}
|
||||
|
||||
static void __free_fdtable(struct fdtable *fdt)
|
||||
|
||||
@@ -334,10 +334,10 @@ void __init files_init(void)
|
||||
void __init files_maxfiles_init(void)
|
||||
{
|
||||
unsigned long n;
|
||||
unsigned long memreserve = (totalram_pages - nr_free_pages()) * 3/2;
|
||||
unsigned long memreserve = (totalram_pages() - nr_free_pages()) * 3/2;
|
||||
|
||||
memreserve = min(memreserve, totalram_pages - 1);
|
||||
n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
|
||||
memreserve = min(memreserve, totalram_pages() - 1);
|
||||
n = ((totalram_pages() - memreserve) * (PAGE_SIZE / 1024)) / 10;
|
||||
|
||||
files_stat.max_files = max_t(unsigned long, n, NR_FILE);
|
||||
}
|
||||
|
||||
@@ -829,7 +829,7 @@ static const struct super_operations fuse_super_operations = {
|
||||
static void sanitize_global_limit(unsigned *limit)
|
||||
{
|
||||
if (*limit == 0)
|
||||
*limit = ((totalram_pages << PAGE_SHIFT) >> 13) /
|
||||
*limit = ((totalram_pages() << PAGE_SHIFT) >> 13) /
|
||||
sizeof(struct fuse_req);
|
||||
|
||||
if (*limit >= 1 << 16)
|
||||
|
||||
@@ -576,7 +576,7 @@ static const struct file_operations proc_lstats_operations = {
|
||||
static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns,
|
||||
struct pid *pid, struct task_struct *task)
|
||||
{
|
||||
unsigned long totalpages = totalram_pages + total_swap_pages;
|
||||
unsigned long totalpages = totalram_pages() + total_swap_pages;
|
||||
unsigned long points = 0;
|
||||
|
||||
points = oom_badness(task, NULL, NULL, totalpages) *
|
||||
|
||||
@@ -25,24 +25,7 @@ static void seq_set_overflow(struct seq_file *m)
|
||||
|
||||
static void *seq_buf_alloc(unsigned long size)
|
||||
{
|
||||
void *buf;
|
||||
gfp_t gfp = GFP_KERNEL;
|
||||
|
||||
if (unlikely(size > MAX_RW_COUNT))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* For high order allocations, use __GFP_NORETRY to avoid oom-killing -
|
||||
* it's better to fall back to vmalloc() than to kill things. For small
|
||||
* allocations, just use GFP_KERNEL which will oom kill, thus no need
|
||||
* for vmalloc fallback.
|
||||
*/
|
||||
if (size > PAGE_SIZE)
|
||||
gfp |= __GFP_NORETRY | __GFP_NOWARN;
|
||||
buf = kmalloc(size, gfp);
|
||||
if (!buf && size > PAGE_SIZE)
|
||||
buf = vmalloc(size);
|
||||
return buf;
|
||||
return kvmalloc(size, GFP_KERNEL);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -66,7 +66,7 @@ kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
|
||||
noio_flag = memalloc_noio_save();
|
||||
|
||||
lflags = kmem_flags_convert(flags);
|
||||
ptr = __vmalloc(size, lflags | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
|
||||
ptr = __vmalloc(size, lflags | __GFP_ZERO, PAGE_KERNEL);
|
||||
|
||||
if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS))
|
||||
memalloc_noio_restore(noio_flag);
|
||||
|
||||
@@ -841,6 +841,10 @@ static inline bool arch_has_pfn_modify_check(void)
|
||||
}
|
||||
#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
|
||||
|
||||
#ifndef PAGE_KERNEL_EXEC
|
||||
# define PAGE_KERNEL_EXEC PAGE_KERNEL
|
||||
#endif
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#ifndef io_remap_pfn_range
|
||||
|
||||
12
include/asm-generic/set_memory.h
Normal file
12
include/asm-generic/set_memory.h
Normal file
@@ -0,0 +1,12 @@
|
||||
#ifndef __ASM_SET_MEMORY_H
|
||||
#define __ASM_SET_MEMORY_H
|
||||
|
||||
/*
|
||||
* Functions to change memory attributes.
|
||||
*/
|
||||
int set_memory_ro(unsigned long addr, int numpages);
|
||||
int set_memory_rw(unsigned long addr, int numpages);
|
||||
int set_memory_x(unsigned long addr, int numpages);
|
||||
int set_memory_nx(unsigned long addr, int numpages);
|
||||
|
||||
#endif
|
||||
@@ -37,8 +37,7 @@ static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
|
||||
if (size * nmemb <= PAGE_SIZE)
|
||||
return kcalloc(nmemb, size, GFP_KERNEL);
|
||||
|
||||
return __vmalloc(size * nmemb,
|
||||
GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
|
||||
return vzalloc(size * nmemb);
|
||||
}
|
||||
|
||||
/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
|
||||
@@ -50,8 +49,7 @@ static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
|
||||
if (size * nmemb <= PAGE_SIZE)
|
||||
return kmalloc(nmemb * size, GFP_KERNEL);
|
||||
|
||||
return __vmalloc(size * nmemb,
|
||||
GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
|
||||
return vmalloc(size * nmemb);
|
||||
}
|
||||
|
||||
static __inline__ void *drm_malloc_gfp(size_t nmemb, size_t size, gfp_t gfp)
|
||||
@@ -69,8 +67,7 @@ static __inline__ void *drm_malloc_gfp(size_t nmemb, size_t size, gfp_t gfp)
|
||||
return ptr;
|
||||
}
|
||||
|
||||
return __vmalloc(size * nmemb,
|
||||
gfp | __GFP_HIGHMEM, PAGE_KERNEL);
|
||||
return __vmalloc(size * nmemb, gfp, PAGE_KERNEL);
|
||||
}
|
||||
|
||||
static __inline void drm_free_large(void *ptr)
|
||||
|
||||
@@ -35,7 +35,31 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
|
||||
|
||||
/* declarations for linux/mm/highmem.c */
|
||||
unsigned int nr_free_highpages(void);
|
||||
extern unsigned long totalhigh_pages;
|
||||
extern atomic_long_t _totalhigh_pages;
|
||||
static inline unsigned long totalhigh_pages(void)
|
||||
{
|
||||
return (unsigned long)atomic_long_read(&_totalhigh_pages);
|
||||
}
|
||||
|
||||
static inline void totalhigh_pages_inc(void)
|
||||
{
|
||||
atomic_long_inc(&_totalhigh_pages);
|
||||
}
|
||||
|
||||
static inline void totalhigh_pages_dec(void)
|
||||
{
|
||||
atomic_long_dec(&_totalhigh_pages);
|
||||
}
|
||||
|
||||
static inline void totalhigh_pages_add(long count)
|
||||
{
|
||||
atomic_long_add(count, &_totalhigh_pages);
|
||||
}
|
||||
|
||||
static inline void totalhigh_pages_set(long val)
|
||||
{
|
||||
atomic_long_set(&_totalhigh_pages, val);
|
||||
}
|
||||
|
||||
void kmap_flush_unused(void);
|
||||
|
||||
@@ -57,7 +81,7 @@ static inline struct page *kmap_to_page(void *addr)
|
||||
return virt_to_page(addr);
|
||||
}
|
||||
|
||||
#define totalhigh_pages 0UL
|
||||
static inline unsigned long totalhigh_pages(void) { return 0UL; }
|
||||
|
||||
#ifndef ARCH_HAS_KMAP
|
||||
static inline void *kmap(struct page *page)
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#define __KMEMLEAK_H
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#ifdef CONFIG_DEBUG_KMEMLEAK
|
||||
|
||||
@@ -30,6 +31,8 @@ extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
|
||||
gfp_t gfp) __ref;
|
||||
extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
|
||||
gfp_t gfp) __ref;
|
||||
extern void kmemleak_vmalloc(const struct vm_struct *area, size_t size,
|
||||
gfp_t gfp) __ref;
|
||||
extern void kmemleak_free(const void *ptr) __ref;
|
||||
extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
|
||||
extern void kmemleak_free_percpu(const void __percpu *ptr) __ref;
|
||||
@@ -81,6 +84,10 @@ static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
|
||||
gfp_t gfp)
|
||||
{
|
||||
}
|
||||
static inline void kmemleak_vmalloc(const struct vm_struct *area, size_t size,
|
||||
gfp_t gfp)
|
||||
{
|
||||
}
|
||||
static inline void kmemleak_free(const void *ptr)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -762,8 +762,6 @@ void kvm_arch_check_processor_compat(void *rtn);
|
||||
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
|
||||
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
|
||||
|
||||
void *kvm_kvzalloc(unsigned long size);
|
||||
|
||||
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
|
||||
static inline struct kvm *kvm_arch_alloc_vm(void)
|
||||
{
|
||||
|
||||
@@ -556,6 +556,19 @@ static inline void list_splice_tail_init(struct list_head *list,
|
||||
for (; &pos->member != (head); \
|
||||
pos = list_next_entry(pos, member))
|
||||
|
||||
/**
|
||||
* list_for_each_entry_from_reverse - iterate backwards over list of given type
|
||||
* from the current point
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the list_head within the struct.
|
||||
*
|
||||
* Iterate backwards over list of given type, continuing from current position.
|
||||
*/
|
||||
#define list_for_each_entry_from_reverse(pos, head, member) \
|
||||
for (; &pos->member != (head); \
|
||||
pos = list_prev_entry(pos, member))
|
||||
|
||||
/**
|
||||
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
|
||||
@@ -121,6 +121,25 @@ static inline void init_llist_head(struct llist_head *list)
|
||||
#define llist_for_each(pos, node) \
|
||||
for ((pos) = (node); pos; (pos) = (pos)->next)
|
||||
|
||||
/**
|
||||
* llist_for_each_safe - iterate over some deleted entries of a lock-less list
|
||||
* safe against removal of list entry
|
||||
* @pos: the &struct llist_node to use as a loop cursor
|
||||
* @n: another &struct llist_node to use as temporary storage
|
||||
* @node: the first entry of deleted list entries
|
||||
*
|
||||
* In general, some entries of the lock-less list can be traversed
|
||||
* safely only after being deleted from list, so start with an entry
|
||||
* instead of list head.
|
||||
*
|
||||
* If being used on entries deleted from lock-less list directly, the
|
||||
* traverse order is from the newest to the oldest added entry. If
|
||||
* you want to traverse from the oldest to the newest, you must
|
||||
* reverse the order by yourself before traversing.
|
||||
*/
|
||||
#define llist_for_each_safe(pos, n, node) \
|
||||
for ((pos) = (node); (pos) && ((n) = (pos)->next, true); (pos) = (n))
|
||||
|
||||
/**
|
||||
* llist_for_each_entry - iterate over some deleted entries of lock-less list of given type
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
|
||||
@@ -44,7 +44,32 @@ static inline void set_max_mapnr(unsigned long limit)
|
||||
static inline void set_max_mapnr(unsigned long limit) { }
|
||||
#endif
|
||||
|
||||
extern unsigned long totalram_pages;
|
||||
extern atomic_long_t _totalram_pages;
|
||||
static inline unsigned long totalram_pages(void)
|
||||
{
|
||||
return (unsigned long)atomic_long_read(&_totalram_pages);
|
||||
}
|
||||
|
||||
static inline void totalram_pages_inc(void)
|
||||
{
|
||||
atomic_long_inc(&_totalram_pages);
|
||||
}
|
||||
|
||||
static inline void totalram_pages_dec(void)
|
||||
{
|
||||
atomic_long_dec(&_totalram_pages);
|
||||
}
|
||||
|
||||
static inline void totalram_pages_add(long count)
|
||||
{
|
||||
atomic_long_add(count, &_totalram_pages);
|
||||
}
|
||||
|
||||
static inline void totalram_pages_set(long val)
|
||||
{
|
||||
atomic_long_set(&_totalram_pages, val);
|
||||
}
|
||||
|
||||
extern void * high_memory;
|
||||
extern int page_cluster;
|
||||
|
||||
@@ -484,16 +509,16 @@ unsigned long vmalloc_to_pfn(const void *addr);
|
||||
* On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
|
||||
* is no special casing required.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
extern int is_vmalloc_addr(const void *x);
|
||||
#else
|
||||
static inline int is_vmalloc_addr(const void *x)
|
||||
static inline bool is_vmalloc_addr(const void *x)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_MMU
|
||||
unsigned long addr = (unsigned long)x;
|
||||
|
||||
return addr >= VMALLOC_START && addr < VMALLOC_END;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
#ifdef CONFIG_MMU
|
||||
extern int is_vmalloc_or_module_addr(const void *x);
|
||||
#else
|
||||
@@ -516,6 +541,7 @@ static inline void *kvzalloc(size_t size, gfp_t flags)
|
||||
{
|
||||
return kvmalloc(size, flags | __GFP_ZERO);
|
||||
}
|
||||
|
||||
extern void kvfree(const void *addr);
|
||||
|
||||
/*
|
||||
|
||||
57
include/linux/set_memory.h
Normal file
57
include/linux/set_memory.h
Normal file
@@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Copyright 2017, Michael Ellerman, IBM Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License version
|
||||
* 2 as published by the Free Software Foundation;
|
||||
*/
|
||||
#ifndef _LINUX_SET_MEMORY_H_
|
||||
#define _LINUX_SET_MEMORY_H_
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
|
||||
#include <asm/set_memory.h>
|
||||
#else
|
||||
static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
|
||||
static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
|
||||
static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
|
||||
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_ARCH_HAS_SET_DIRECT_MAP
|
||||
static inline int set_direct_map_invalid_noflush(struct page *page)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int set_direct_map_default_noflush(struct page *page)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef set_mce_nospec
|
||||
static inline int set_mce_nospec(unsigned long pfn)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef clear_mce_nospec
|
||||
static inline int clear_mce_nospec(unsigned long pfn)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_ARCH_HAS_MEM_ENCRYPT
|
||||
static inline int set_memory_encrypted(unsigned long addr, int numpages)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int set_memory_decrypted(unsigned long addr, int numpages)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
|
||||
|
||||
#endif /* _LINUX_SET_MEMORY_H_ */
|
||||
@@ -286,7 +286,6 @@ static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
|
||||
}
|
||||
|
||||
/* linux/mm/page_alloc.c */
|
||||
extern unsigned long totalram_pages;
|
||||
extern unsigned long totalreserve_pages;
|
||||
extern unsigned long nr_free_buffer_pages(void);
|
||||
extern unsigned long nr_free_pagecache_pages(void);
|
||||
|
||||
@@ -19,8 +19,11 @@ struct notifier_block; /* in notifier.h */
|
||||
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
|
||||
#define VM_NO_GUARD 0x00000040 /* don't add guard page */
|
||||
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
|
||||
#define VM_LOWMEM 0x00000100 /* Tracking of direct mapped lowmem */
|
||||
|
||||
/*
|
||||
* Memory with VM_FLUSH_RESET_PERMS cannot be freed in an interrupt or with
|
||||
* vfree_atomic().
|
||||
*/
|
||||
#define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */
|
||||
/* bits [20..32] reserved for arch specific ioremap internals */
|
||||
|
||||
/*
|
||||
@@ -45,12 +48,16 @@ struct vm_struct {
|
||||
struct vmap_area {
|
||||
unsigned long va_start;
|
||||
unsigned long va_end;
|
||||
|
||||
/*
|
||||
* Largest available free size in subtree.
|
||||
*/
|
||||
unsigned long subtree_max_size;
|
||||
unsigned long flags;
|
||||
struct rb_node rb_node; /* address sorted rbtree */
|
||||
struct list_head list; /* address sorted list */
|
||||
struct llist_node purge_list; /* "lazy purge" list */
|
||||
struct vm_struct *vm;
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -85,6 +92,17 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
|
||||
unsigned long start, unsigned long end, gfp_t gfp_mask,
|
||||
pgprot_t prot, unsigned long vm_flags, int node,
|
||||
const void *caller);
|
||||
#ifndef CONFIG_MMU
|
||||
extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
|
||||
static inline void *__vmalloc_node_flags_caller(unsigned long size, int node,
|
||||
gfp_t flags, void *caller)
|
||||
{
|
||||
return __vmalloc_node_flags(size, node, flags);
|
||||
}
|
||||
#else
|
||||
extern void *__vmalloc_node_flags_caller(unsigned long size,
|
||||
int node, gfp_t flags, void *caller);
|
||||
#endif
|
||||
|
||||
extern void vfree(const void *addr);
|
||||
extern void vfree_atomic(const void *addr);
|
||||
@@ -145,6 +163,13 @@ extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
|
||||
pgprot_t prot, struct page **pages);
|
||||
extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
|
||||
extern void unmap_kernel_range(unsigned long addr, unsigned long size);
|
||||
static inline void set_vm_flush_reset_perms(void *addr)
|
||||
{
|
||||
struct vm_struct *vm = find_vm_area(addr);
|
||||
|
||||
if (vm)
|
||||
vm->flags |= VM_FLUSH_RESET_PERMS;
|
||||
}
|
||||
#else
|
||||
static inline int
|
||||
map_kernel_range_noflush(unsigned long start, unsigned long size,
|
||||
@@ -160,6 +185,9 @@ static inline void
|
||||
unmap_kernel_range(unsigned long addr, unsigned long size)
|
||||
{
|
||||
}
|
||||
static inline void set_vm_flush_reset_perms(void *addr)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Allocate/destroy a 'vmalloc' VM area. */
|
||||
@@ -176,13 +204,6 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
|
||||
extern struct list_head vmap_area_list;
|
||||
extern __init void vm_area_add_early(struct vm_struct *vm);
|
||||
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
|
||||
extern __init int vm_area_check_early(struct vm_struct *vm);
|
||||
#ifdef CONFIG_ENABLE_VMALLOC_SAVING
|
||||
extern void mark_vmalloc_reserved_area(void *addr, unsigned long size);
|
||||
#else
|
||||
static inline void mark_vmalloc_reserved_area(void *addr, unsigned long size)
|
||||
{ };
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
# ifdef CONFIG_MMU
|
||||
@@ -208,12 +229,7 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#ifdef CONFIG_ENABLE_VMALLOC_SAVING
|
||||
extern unsigned long total_vmalloc_size;
|
||||
#define VMALLOC_TOTAL total_vmalloc_size
|
||||
#else
|
||||
#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
|
||||
#endif
|
||||
#else
|
||||
#define VMALLOC_TOTAL 0UL
|
||||
#endif
|
||||
|
||||
@@ -403,12 +403,7 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
|
||||
*/
|
||||
void *ipc_alloc(int size)
|
||||
{
|
||||
void *out;
|
||||
if (size > PAGE_SIZE)
|
||||
out = vmalloc(size);
|
||||
else
|
||||
out = kmalloc(size, GFP_KERNEL);
|
||||
return out;
|
||||
return kvmalloc(size, GFP_KERNEL);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -81,8 +81,7 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
|
||||
|
||||
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
|
||||
{
|
||||
gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
|
||||
gfp_extra_flags;
|
||||
gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
|
||||
struct bpf_prog_aux *aux;
|
||||
struct bpf_prog *fp;
|
||||
|
||||
@@ -198,8 +197,7 @@ void bpf_prog_free_linfo(struct bpf_prog *prog)
|
||||
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
|
||||
gfp_t gfp_extra_flags)
|
||||
{
|
||||
gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
|
||||
gfp_extra_flags;
|
||||
gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
|
||||
struct bpf_prog *fp;
|
||||
u32 pages, delta;
|
||||
int ret;
|
||||
@@ -937,8 +935,7 @@ out:
|
||||
static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
|
||||
gfp_t gfp_extra_flags)
|
||||
{
|
||||
gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
|
||||
gfp_extra_flags;
|
||||
gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
|
||||
struct bpf_prog *fp;
|
||||
|
||||
fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
|
||||
|
||||
@@ -152,6 +152,7 @@ static void *__bpf_map_area_alloc(size_t size, int numa_node, bool mmapable)
|
||||
}
|
||||
return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags,
|
||||
__builtin_return_address(0));
|
||||
/*return __vmalloc(size, GFP_KERNEL | flags, PAGE_KERNEL);*/
|
||||
}
|
||||
|
||||
void *bpf_map_area_alloc(size_t size, int numa_node)
|
||||
|
||||
@@ -199,7 +199,7 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
|
||||
|
||||
stack = __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE,
|
||||
VMALLOC_START, VMALLOC_END,
|
||||
THREADINFO_GFP | __GFP_HIGHMEM,
|
||||
THREADINFO_GFP,
|
||||
PAGE_KERNEL,
|
||||
0, node, __builtin_return_address(0));
|
||||
|
||||
@@ -420,10 +420,10 @@ static void set_max_threads(unsigned int max_threads_suggested)
|
||||
* The number of threads shall be limited such that the thread
|
||||
* structures may only consume a small part of the available memory.
|
||||
*/
|
||||
if (fls64(totalram_pages) + fls64(PAGE_SIZE) > 64)
|
||||
if (fls64(totalram_pages()) + fls64(PAGE_SIZE) > 64)
|
||||
threads = MAX_THREADS;
|
||||
else
|
||||
threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE,
|
||||
threads = div64_u64((u64) totalram_pages() * (u64) PAGE_SIZE,
|
||||
(u64) THREAD_SIZE * 8UL);
|
||||
|
||||
if (threads > max_threads_suggested)
|
||||
|
||||
@@ -18,7 +18,7 @@ struct group_info *groups_alloc(int gidsetsize)
|
||||
len = sizeof(struct group_info) + sizeof(kgid_t) * gidsetsize;
|
||||
gi = kmalloc(len, GFP_KERNEL_ACCOUNT|__GFP_NOWARN|__GFP_NORETRY);
|
||||
if (!gi)
|
||||
gi = __vmalloc(len, GFP_KERNEL_ACCOUNT|__GFP_HIGHMEM, PAGE_KERNEL);
|
||||
gi = __vmalloc(len, GFP_KERNEL_ACCOUNT, PAGE_KERNEL);
|
||||
if (!gi)
|
||||
return NULL;
|
||||
|
||||
|
||||
@@ -223,13 +223,13 @@ int sanity_check_segment_list(struct kimage *image)
|
||||
* wasted allocating pages, which can cause a soft lockup.
|
||||
*/
|
||||
for (i = 0; i < nr_segments; i++) {
|
||||
if (PAGE_COUNT(image->segment[i].memsz) > totalram_pages / 2)
|
||||
if (PAGE_COUNT(image->segment[i].memsz) > totalram_pages() / 2)
|
||||
return -EINVAL;
|
||||
|
||||
total_pages += PAGE_COUNT(image->segment[i].memsz);
|
||||
}
|
||||
|
||||
if (total_pages > totalram_pages / 2)
|
||||
if (total_pages > totalram_pages() / 2)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
|
||||
@@ -2879,7 +2879,7 @@ static int copy_module_from_user(const void __user *umod, unsigned long len,
|
||||
|
||||
/* Suck in entire file: we'll want most of it. */
|
||||
info->hdr = __vmalloc(info->len,
|
||||
GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, PAGE_KERNEL);
|
||||
GFP_KERNEL | __GFP_NOWARN, PAGE_KERNEL);
|
||||
if (!info->hdr)
|
||||
return -ENOMEM;
|
||||
|
||||
|
||||
@@ -101,7 +101,7 @@ unsigned long image_size;
|
||||
|
||||
void __init hibernate_image_size_init(void)
|
||||
{
|
||||
image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
|
||||
image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -162,7 +162,7 @@ void free_bootmem_late(unsigned long physaddr, unsigned long size)
|
||||
|
||||
for (; cursor < end; cursor++) {
|
||||
__free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
|
||||
totalram_pages++;
|
||||
totalram_pages_inc();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -278,7 +278,7 @@ unsigned long __init free_all_bootmem(void)
|
||||
list_for_each_entry(bdata, &bdata_list, list)
|
||||
total_pages += free_all_bootmem_core(bdata);
|
||||
|
||||
totalram_pages += total_pages;
|
||||
totalram_pages_add(total_pages);
|
||||
|
||||
return total_pages;
|
||||
}
|
||||
|
||||
@@ -104,9 +104,8 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
|
||||
}
|
||||
#endif
|
||||
|
||||
unsigned long totalhigh_pages __read_mostly;
|
||||
EXPORT_SYMBOL(totalhigh_pages);
|
||||
|
||||
atomic_long_t _totalhigh_pages __read_mostly;
|
||||
EXPORT_SYMBOL(_totalhigh_pages);
|
||||
|
||||
EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
|
||||
|
||||
|
||||
@@ -409,7 +409,7 @@ static int __init hugepage_init(void)
|
||||
* where the extra memory used could hurt more than TLB overhead
|
||||
* is likely to save. The admin can still enable it through /sys.
|
||||
*/
|
||||
if (totalram_pages < (512 << (20 - PAGE_SHIFT))) {
|
||||
if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
|
||||
transparent_hugepage_flags = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2220,7 +2220,7 @@ static void __init gather_bootmem_prealloc(void)
|
||||
prep_new_huge_page(h, page, page_to_nid(page));
|
||||
/*
|
||||
* If we had gigantic hugepages allocated at boot time, we need
|
||||
* to restore the 'stolen' pages to totalram_pages in order to
|
||||
* to restore the 'stolen' pages to totalram_pages() in order to
|
||||
* fix confusing memory reports from free(1) and another
|
||||
* side-effects, like CommitLimit going negative.
|
||||
*/
|
||||
|
||||
@@ -691,7 +691,7 @@ int kasan_module_alloc(void *addr, size_t size)
|
||||
|
||||
ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
|
||||
shadow_start + shadow_size,
|
||||
GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
|
||||
GFP_KERNEL | __GFP_ZERO,
|
||||
PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
|
||||
|
||||
@@ -236,7 +236,7 @@ void quarantine_reduce(void)
|
||||
* Update quarantine size in case of hotplug. Allocate a fraction of
|
||||
* the installed memory to quarantine minus per-cpu queue limits.
|
||||
*/
|
||||
total_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) /
|
||||
total_size = (totalram_pages() << PAGE_SHIFT) /
|
||||
QUARANTINE_FRACTION;
|
||||
percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
|
||||
new_quarantine_size = (total_size < percpu_quarantines) ?
|
||||
|
||||
136
mm/kmemleak.c
136
mm/kmemleak.c
@@ -148,7 +148,7 @@ struct kmemleak_scan_area {
|
||||
*/
|
||||
struct kmemleak_object {
|
||||
spinlock_t lock;
|
||||
unsigned long flags; /* object status flags */
|
||||
unsigned int flags; /* object status flags */
|
||||
struct list_head object_list;
|
||||
struct list_head gray_list;
|
||||
struct rb_node rb_node;
|
||||
@@ -157,6 +157,8 @@ struct kmemleak_object {
|
||||
atomic_t use_count;
|
||||
unsigned long pointer;
|
||||
size_t size;
|
||||
/* pass surplus references to this pointer */
|
||||
unsigned long excess_ref;
|
||||
/* minimum number of a pointers found before it is considered leak */
|
||||
int min_count;
|
||||
/* the total number of pointers found pointing to this object */
|
||||
@@ -263,7 +265,8 @@ enum {
|
||||
KMEMLEAK_NOT_LEAK,
|
||||
KMEMLEAK_IGNORE,
|
||||
KMEMLEAK_SCAN_AREA,
|
||||
KMEMLEAK_NO_SCAN
|
||||
KMEMLEAK_NO_SCAN,
|
||||
KMEMLEAK_SET_EXCESS_REF
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -272,9 +275,12 @@ enum {
|
||||
*/
|
||||
struct early_log {
|
||||
int op_type; /* kmemleak operation type */
|
||||
const void *ptr; /* allocated/freed memory block */
|
||||
size_t size; /* memory block size */
|
||||
int min_count; /* minimum reference count */
|
||||
const void *ptr; /* allocated/freed memory block */
|
||||
union {
|
||||
size_t size; /* memory block size */
|
||||
unsigned long excess_ref; /* surplus reference passing */
|
||||
};
|
||||
unsigned long trace[MAX_TRACE]; /* stack trace */
|
||||
unsigned int trace_len; /* stack trace length */
|
||||
};
|
||||
@@ -403,7 +409,7 @@ static void dump_object_info(struct kmemleak_object *object)
|
||||
object->comm, object->pid, object->jiffies);
|
||||
pr_notice(" min_count = %d\n", object->min_count);
|
||||
pr_notice(" count = %d\n", object->count);
|
||||
pr_notice(" flags = 0x%lx\n", object->flags);
|
||||
pr_notice(" flags = 0x%x\n", object->flags);
|
||||
pr_notice(" checksum = %u\n", object->checksum);
|
||||
pr_notice(" backtrace:\n");
|
||||
print_stack_trace(&trace, 4);
|
||||
@@ -572,6 +578,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
|
||||
object->flags = OBJECT_ALLOCATED;
|
||||
object->pointer = ptr;
|
||||
object->size = size;
|
||||
object->excess_ref = 0;
|
||||
object->min_count = min_count;
|
||||
object->count = 0; /* white color initially */
|
||||
object->jiffies = jiffies;
|
||||
@@ -804,6 +811,30 @@ out:
|
||||
put_object(object);
|
||||
}
|
||||
|
||||
/*
|
||||
* Any surplus references (object already gray) to 'ptr' are passed to
|
||||
* 'excess_ref'. This is used in the vmalloc() case where a pointer to
|
||||
* vm_struct may be used as an alternative reference to the vmalloc'ed object
|
||||
* (see free_thread_stack()).
|
||||
*/
|
||||
static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct kmemleak_object *object;
|
||||
|
||||
object = find_and_get_object(ptr, 0);
|
||||
if (!object) {
|
||||
kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
|
||||
ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&object->lock, flags);
|
||||
object->excess_ref = excess_ref;
|
||||
spin_unlock_irqrestore(&object->lock, flags);
|
||||
put_object(object);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the OBJECT_NO_SCAN flag for the object corresponding to the give
|
||||
* pointer. Such object will not be scanned by kmemleak but references to it
|
||||
@@ -918,7 +949,7 @@ static void early_alloc_percpu(struct early_log *log)
|
||||
* @gfp: kmalloc() flags used for kmemleak internal memory allocations
|
||||
*
|
||||
* This function is called from the kernel allocators when a new object
|
||||
* (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.).
|
||||
* (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
|
||||
*/
|
||||
void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
|
||||
gfp_t gfp)
|
||||
@@ -961,6 +992,36 @@ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
|
||||
|
||||
/**
|
||||
* kmemleak_vmalloc - register a newly vmalloc'ed object
|
||||
* @area: pointer to vm_struct
|
||||
* @size: size of the object
|
||||
* @gfp: __vmalloc() flags used for kmemleak internal memory allocations
|
||||
*
|
||||
* This function is called from the vmalloc() kernel allocator when a new
|
||||
* object (memory block) is allocated.
|
||||
*/
|
||||
void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
|
||||
{
|
||||
pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
|
||||
|
||||
/*
|
||||
* A min_count = 2 is needed because vm_struct contains a reference to
|
||||
* the virtual address of the vmalloc'ed block.
|
||||
*/
|
||||
if (kmemleak_enabled) {
|
||||
create_object((unsigned long)area->addr, size, 2, gfp);
|
||||
object_set_excess_ref((unsigned long)area,
|
||||
(unsigned long)area->addr);
|
||||
} else if (kmemleak_early_log) {
|
||||
log_early(KMEMLEAK_ALLOC, area->addr, size, 2);
|
||||
/* reusing early_log.size for storing area->addr */
|
||||
log_early(KMEMLEAK_SET_EXCESS_REF,
|
||||
area, (unsigned long)area->addr, 0);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
|
||||
|
||||
/**
|
||||
* kmemleak_free - unregister a previously registered object
|
||||
* @ptr: pointer to beginning of the object
|
||||
@@ -1197,6 +1258,30 @@ static bool update_checksum(struct kmemleak_object *object)
|
||||
return object->checksum != old_csum;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update an object's references. object->lock must be held by the caller.
|
||||
*/
|
||||
static void update_refs(struct kmemleak_object *object)
|
||||
{
|
||||
if (!color_white(object)) {
|
||||
/* non-orphan, ignored or new */
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Increase the object's reference count (number of pointers to the
|
||||
* memory block). If this count reaches the required minimum, the
|
||||
* object's color will become gray and it will be added to the
|
||||
* gray_list.
|
||||
*/
|
||||
object->count++;
|
||||
if (color_gray(object)) {
|
||||
/* put_object() called when removing from gray_list */
|
||||
WARN_ON(!get_object(object));
|
||||
list_add_tail(&object->gray_list, &gray_list);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Memory scanning is a long process and it needs to be interruptable. This
|
||||
* function checks whether such interrupt condition occurred.
|
||||
@@ -1234,6 +1319,7 @@ static void scan_block(void *_start, void *_end,
|
||||
for (ptr = start; ptr < end; ptr++) {
|
||||
struct kmemleak_object *object;
|
||||
unsigned long pointer;
|
||||
unsigned long excess_ref;
|
||||
|
||||
if (scan_should_stop())
|
||||
break;
|
||||
@@ -1269,25 +1355,27 @@ static void scan_block(void *_start, void *_end,
|
||||
* enclosed by scan_mutex.
|
||||
*/
|
||||
spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
|
||||
if (!color_white(object)) {
|
||||
/* non-orphan, ignored or new */
|
||||
spin_unlock(&object->lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Increase the object's reference count (number of pointers
|
||||
* to the memory block). If this count reaches the required
|
||||
* minimum, the object's color will become gray and it will be
|
||||
* added to the gray_list.
|
||||
*/
|
||||
object->count++;
|
||||
/* only pass surplus references (object already gray) */
|
||||
if (color_gray(object)) {
|
||||
/* put_object() called when removing from gray_list */
|
||||
WARN_ON(!get_object(object));
|
||||
list_add_tail(&object->gray_list, &gray_list);
|
||||
excess_ref = object->excess_ref;
|
||||
/* no need for update_refs() if object already gray */
|
||||
} else {
|
||||
excess_ref = 0;
|
||||
update_refs(object);
|
||||
}
|
||||
spin_unlock(&object->lock);
|
||||
|
||||
if (excess_ref) {
|
||||
object = lookup_object(excess_ref, 0);
|
||||
if (!object)
|
||||
continue;
|
||||
if (object == scanned)
|
||||
/* circular reference, ignore */
|
||||
continue;
|
||||
spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
|
||||
update_refs(object);
|
||||
spin_unlock(&object->lock);
|
||||
}
|
||||
}
|
||||
read_unlock_irqrestore(&kmemleak_lock, flags);
|
||||
}
|
||||
@@ -1994,6 +2082,10 @@ void __init kmemleak_init(void)
|
||||
case KMEMLEAK_NO_SCAN:
|
||||
kmemleak_no_scan(log->ptr);
|
||||
break;
|
||||
case KMEMLEAK_SET_EXCESS_REF:
|
||||
object_set_excess_ref((unsigned long)log->ptr,
|
||||
log->excess_ref);
|
||||
break;
|
||||
default:
|
||||
kmemleak_warn("Unknown early log operation: %d\n",
|
||||
log->op_type);
|
||||
|
||||
@@ -1397,7 +1397,7 @@ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
|
||||
|
||||
for (; cursor < end; cursor++) {
|
||||
__free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
|
||||
totalram_pages++;
|
||||
totalram_pages_inc();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -151,7 +151,7 @@ static void __meminit mm_compute_batch(void)
|
||||
s32 batch = max_t(s32, nr*2, 32);
|
||||
|
||||
/* batch size set to 0.4% of (total memory/#cpus), or max int32 */
|
||||
memsized_batch = min_t(u64, (totalram_pages/nr)/256, 0x7fffffff);
|
||||
memsized_batch = min_t(u64, (totalram_pages()/nr)/256, 0x7fffffff);
|
||||
|
||||
vm_committed_as_batch = max_t(s32, memsized_batch, batch);
|
||||
}
|
||||
|
||||
@@ -91,7 +91,7 @@ void free_bootmem_late(unsigned long addr, unsigned long size)
|
||||
|
||||
for (; cursor < end; cursor++) {
|
||||
__free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
|
||||
totalram_pages++;
|
||||
totalram_pages_inc();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -184,7 +184,7 @@ unsigned long __init free_all_bootmem(void)
|
||||
reset_all_zones_managed_pages();
|
||||
|
||||
pages = free_low_memory_core_early();
|
||||
totalram_pages += pages;
|
||||
totalram_pages_add(pages);
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
12
mm/nommu.c
12
mm/nommu.c
@@ -236,12 +236,16 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
|
||||
}
|
||||
EXPORT_SYMBOL(__vmalloc);
|
||||
|
||||
void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags)
|
||||
{
|
||||
return __vmalloc(size, flags, PAGE_KERNEL);
|
||||
}
|
||||
|
||||
void *vmalloc_user(unsigned long size)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
|
||||
PAGE_KERNEL);
|
||||
ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
|
||||
if (ret) {
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
@@ -359,10 +363,6 @@ void *vzalloc_node(unsigned long size, int node)
|
||||
}
|
||||
EXPORT_SYMBOL(vzalloc_node);
|
||||
|
||||
#ifndef PAGE_KERNEL_EXEC
|
||||
# define PAGE_KERNEL_EXEC PAGE_KERNEL
|
||||
#endif
|
||||
|
||||
/**
|
||||
* vmalloc_exec - allocate virtually contiguous, executable memory
|
||||
* @size: allocation size
|
||||
|
||||
@@ -237,7 +237,7 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc)
|
||||
}
|
||||
|
||||
/* Default to all available memory */
|
||||
oc->totalpages = totalram_pages + total_swap_pages;
|
||||
oc->totalpages = totalram_pages() + total_swap_pages;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_NUMA))
|
||||
return CONSTRAINT_NONE;
|
||||
|
||||
@@ -2066,7 +2066,7 @@ static int page_writeback_cpu_online(unsigned int cpu)
|
||||
* However, that was when we used "dirty_ratio" to scale with
|
||||
* all memory, and we don't do that any more. "dirty_ratio"
|
||||
* is now applied to total non-HIGHPAGE memory (by subtracting
|
||||
* totalhigh_pages from vm_total_pages), and as such we can't
|
||||
* totalhigh_pages() from vm_total_pages), and as such we can't
|
||||
* get into the old insane situation any more where we had
|
||||
* large amounts of dirty pages compared to a small amount of
|
||||
* non-HIGHMEM memory.
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/pagemap.h>
|
||||
@@ -121,7 +122,8 @@ EXPORT_SYMBOL(node_states);
|
||||
/* Protect totalram_pages and zone->managed_pages */
|
||||
static DEFINE_SPINLOCK(managed_page_count_lock);
|
||||
|
||||
unsigned long totalram_pages __read_mostly;
|
||||
atomic_long_t _totalram_pages __read_mostly;
|
||||
EXPORT_SYMBOL(_totalram_pages);
|
||||
unsigned long totalreserve_pages __read_mostly;
|
||||
unsigned long totalcma_pages __read_mostly;
|
||||
|
||||
@@ -4375,11 +4377,11 @@ EXPORT_SYMBOL_GPL(si_mem_available);
|
||||
|
||||
void si_meminfo(struct sysinfo *val)
|
||||
{
|
||||
val->totalram = totalram_pages;
|
||||
val->totalram = totalram_pages();
|
||||
val->sharedram = global_node_page_state(NR_SHMEM);
|
||||
val->freeram = global_page_state(NR_FREE_PAGES);
|
||||
val->bufferram = nr_blockdev_pages();
|
||||
val->totalhigh = totalhigh_pages;
|
||||
val->totalhigh = totalhigh_pages();
|
||||
val->freehigh = nr_free_highpages();
|
||||
val->mem_unit = PAGE_SIZE;
|
||||
}
|
||||
@@ -6641,10 +6643,10 @@ void adjust_managed_page_count(struct page *page, long count)
|
||||
{
|
||||
spin_lock(&managed_page_count_lock);
|
||||
page_zone(page)->managed_pages += count;
|
||||
totalram_pages += count;
|
||||
totalram_pages_add(count);
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
if (PageHighMem(page))
|
||||
totalhigh_pages += count;
|
||||
totalhigh_pages_add(count);
|
||||
#endif
|
||||
spin_unlock(&managed_page_count_lock);
|
||||
}
|
||||
@@ -6675,9 +6677,9 @@ EXPORT_SYMBOL(free_reserved_area);
|
||||
void free_highmem_page(struct page *page)
|
||||
{
|
||||
__free_reserved_page(page);
|
||||
totalram_pages++;
|
||||
totalram_pages_inc();
|
||||
page_zone(page)->managed_pages++;
|
||||
totalhigh_pages++;
|
||||
totalhigh_pages_inc();
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -6726,10 +6728,10 @@ void __init mem_init_print_info(const char *str)
|
||||
physpages << (PAGE_SHIFT - 10),
|
||||
codesize >> 10, datasize >> 10, rosize >> 10,
|
||||
(init_data_size + init_code_size) >> 10, bss_size >> 10,
|
||||
(physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10),
|
||||
(physpages - totalram_pages() - totalcma_pages) << (PAGE_SHIFT - 10),
|
||||
totalcma_pages << (PAGE_SHIFT - 10),
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
totalhigh_pages << (PAGE_SHIFT - 10),
|
||||
totalhigh_pages() << (PAGE_SHIFT - 10),
|
||||
#endif
|
||||
str ? ", " : "", str ? str : "");
|
||||
}
|
||||
|
||||
@@ -101,12 +101,12 @@ struct shmem_falloc {
|
||||
#ifdef CONFIG_TMPFS
|
||||
static unsigned long shmem_default_max_blocks(void)
|
||||
{
|
||||
return totalram_pages / 2;
|
||||
return totalram_pages() / 2;
|
||||
}
|
||||
|
||||
static unsigned long shmem_default_max_inodes(void)
|
||||
{
|
||||
return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
|
||||
return min(totalram_pages() - totalhigh_pages(), totalram_pages() / 2);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -3380,7 +3380,7 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
|
||||
size = memparse(value,&rest);
|
||||
if (*rest == '%') {
|
||||
size <<= PAGE_SHIFT;
|
||||
size *= totalram_pages;
|
||||
size *= totalram_pages();
|
||||
do_div(size, 100);
|
||||
rest++;
|
||||
}
|
||||
|
||||
@@ -1256,7 +1256,7 @@ void __init kmem_cache_init(void)
|
||||
* page orders on machines with more than 32MB of memory if
|
||||
* not overridden on the command line.
|
||||
*/
|
||||
if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
|
||||
if (!slab_max_order_set && totalram_pages() > (32 << 20) >> PAGE_SHIFT)
|
||||
slab_max_order = SLAB_MAX_ORDER_HI;
|
||||
|
||||
/* Bootstrap is tricky, because several objects are allocated
|
||||
|
||||
@@ -981,7 +981,7 @@ EXPORT_SYMBOL(pagevec_lookup_range_nr_tag);
|
||||
*/
|
||||
void __init swap_setup(void)
|
||||
{
|
||||
unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
|
||||
unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
|
||||
#ifdef CONFIG_SWAP
|
||||
int i;
|
||||
|
||||
|
||||
48
mm/util.c
48
mm/util.c
@@ -381,6 +381,52 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
|
||||
}
|
||||
EXPORT_SYMBOL(vm_mmap);
|
||||
|
||||
/**
|
||||
* kvmalloc_node - attempt to allocate physically contiguous memory, but upon
|
||||
* failure, fall back to non-contiguous (vmalloc) allocation.
|
||||
* @size: size of the request.
|
||||
* @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
|
||||
* @node: numa node to allocate from
|
||||
*
|
||||
* Uses kmalloc to get the memory but if the allocation fails then falls back
|
||||
* to the vmalloc allocator. Use kvfree for freeing the memory.
|
||||
*
|
||||
* Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported
|
||||
*
|
||||
* Any use of gfp flags outside of GFP_KERNEL should be consulted with mm people.
|
||||
*/
|
||||
void *kvmalloc_node(size_t size, gfp_t flags, int node)
|
||||
{
|
||||
gfp_t kmalloc_flags = flags;
|
||||
void *ret;
|
||||
|
||||
/*
|
||||
* vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
|
||||
* so the given set of flags has to be compatible.
|
||||
*/
|
||||
WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
|
||||
|
||||
/*
|
||||
* Make sure that larger requests are not too disruptive - no OOM
|
||||
* killer and no allocation failure warnings as we have a fallback
|
||||
*/
|
||||
if (size > PAGE_SIZE)
|
||||
kmalloc_flags |= __GFP_NORETRY | __GFP_NOWARN;
|
||||
|
||||
ret = kmalloc_node(size, kmalloc_flags, node);
|
||||
|
||||
/*
|
||||
* It doesn't really make sense to fallback to vmalloc for sub page
|
||||
* requests
|
||||
*/
|
||||
if (ret || size <= PAGE_SIZE)
|
||||
return ret;
|
||||
|
||||
return __vmalloc_node_flags_caller(size, node, flags,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(kvmalloc_node);
|
||||
|
||||
void kvfree(const void *addr)
|
||||
{
|
||||
if (is_vmalloc_addr(addr))
|
||||
@@ -527,7 +573,7 @@ unsigned long vm_commit_limit(void)
|
||||
if (sysctl_overcommit_kbytes)
|
||||
allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
|
||||
else
|
||||
allowed = ((totalram_pages - hugetlb_total_pages())
|
||||
allowed = ((totalram_pages() - hugetlb_total_pages())
|
||||
* sysctl_overcommit_ratio / 100);
|
||||
allowed += total_swap_pages;
|
||||
|
||||
|
||||
1534
mm/vmalloc.c
1534
mm/vmalloc.c
File diff suppressed because it is too large
Load Diff
@@ -511,10 +511,10 @@ static int __init workingset_init(void)
|
||||
* actionable refault distance, which is currently half of
|
||||
* memory (totalram_pages/2). However, memory hotplug may add
|
||||
* some more pages at runtime, so keep working with up to
|
||||
* double the initial memory by using totalram_pages as-is.
|
||||
* double the initial memory by using totalram_pages() as-is.
|
||||
*/
|
||||
timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
|
||||
max_order = fls_long(totalram_pages - 1);
|
||||
max_order = fls_long(totalram_pages() - 1);
|
||||
if (max_order > timestamp_bits)
|
||||
bucket_order = max_order - timestamp_bits;
|
||||
pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
|
||||
|
||||
@@ -174,7 +174,7 @@ void *ceph_kvmalloc(size_t size, gfp_t flags)
|
||||
return ptr;
|
||||
}
|
||||
|
||||
return __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL);
|
||||
return __vmalloc(size, flags, PAGE_KERNEL);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1149,10 +1149,10 @@ static int __init dccp_init(void)
|
||||
*
|
||||
* The methodology is similar to that of the buffer cache.
|
||||
*/
|
||||
if (totalram_pages >= (128 * 1024))
|
||||
goal = totalram_pages >> (21 - PAGE_SHIFT);
|
||||
if (totalram_pages() >= (128 * 1024))
|
||||
goal = totalram_pages() >> (21 - PAGE_SHIFT);
|
||||
else
|
||||
goal = totalram_pages >> (23 - PAGE_SHIFT);
|
||||
goal = totalram_pages() >> (23 - PAGE_SHIFT);
|
||||
|
||||
if (thash_entries)
|
||||
goal = (thash_entries *
|
||||
|
||||
@@ -1877,7 +1877,7 @@ void __init dn_route_init(void)
|
||||
dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
|
||||
add_timer(&dn_route_timer);
|
||||
|
||||
goal = totalram_pages >> (26 - PAGE_SHIFT);
|
||||
goal = totalram_pages() >> (26 - PAGE_SHIFT);
|
||||
|
||||
for(order = 0; (1UL << order) < goal; order++)
|
||||
/* NOTHING */;
|
||||
|
||||
@@ -1142,7 +1142,7 @@ static int __net_init tcp_net_metrics_init(struct net *net)
|
||||
|
||||
slots = tcpmhash_entries;
|
||||
if (!slots) {
|
||||
if (totalram_pages >= 128 * 1024)
|
||||
if (totalram_pages() >= 128 * 1024)
|
||||
slots = 16 * 1024;
|
||||
else
|
||||
slots = 8 * 1024;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user