[Detail] Porting mm module 1. memcfg Record kernel memory info for memory footprint 2. memory-amms Add amms driver to support secure communication between AP and modem 3. memory-lowpower This driver is used to cooperate with memory-lowpower with CMA. It provides a basic framework to construct the flow between kernel power management(PM) and memory-lowpower features. 4. memory-ssmr This driver is used to cooperate with secure memory or TEE with CMA. Support feature as following : svp iris-recognition 2d_fr tui wfd prot-sharedmem 5. zone_movable_cma- ZMC 2.0 Memory allocation of movable zone is too hard to page allocation. We should create a middle level of page allocation policy zone - ease to allocate page, but not none movable page. We move out memory lowpower cma out to NORMAL zone but keep SSVP in movable zone. In futhor zone management, hard limited memory cma memory are at movable zone. CMA memory that may allocation fail may acceptable memory stay in normal zone. MTK-Commit-Id: 388556ea2de62ee9830347408a5fe2f3e4c43dfc Change-Id: Ie738ffa4b0c4b1f647056dd85e9876abd0eb35e5 Signed-off-by: James Hsu <james.hsu@mediatek.com> CR-Id: ALPS04020096 Feature: [Module]Kernel Memory Management
45 lines
1.4 KiB
C
45 lines
1.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __CMA_H__
|
|
#define __CMA_H__
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/types.h>
|
|
|
|
/*
|
|
* There is always at least global CMA area and a few optional
|
|
* areas configured in kernel .config.
|
|
*/
|
|
#ifdef CONFIG_CMA_AREAS
|
|
#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
|
|
|
|
#else
|
|
#define MAX_CMA_AREAS (0)
|
|
|
|
#endif
|
|
|
|
struct cma;
|
|
|
|
extern unsigned long totalcma_pages;
|
|
extern phys_addr_t cma_get_base(const struct cma *cma);
|
|
extern unsigned long cma_get_size(const struct cma *cma);
|
|
extern const char *cma_get_name(const struct cma *cma);
|
|
extern void cma_get_range(phys_addr_t *base, phys_addr_t *size);
|
|
|
|
extern int __init cma_declare_contiguous(phys_addr_t base,
|
|
phys_addr_t size, phys_addr_t limit,
|
|
phys_addr_t alignment, unsigned int order_per_bit,
|
|
bool fixed, const char *name, struct cma **res_cma);
|
|
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
|
|
unsigned int order_per_bit,
|
|
const char *name,
|
|
struct cma **res_cma);
|
|
extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
|
|
gfp_t gfp_mask);
|
|
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
|
|
|
|
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
|
|
#ifdef CONFIG_ZONE_MOVABLE_CMA
|
|
extern int cma_alloc_range_ok(struct cma *cma, int count, int align);
|
|
#endif
|
|
#endif
|