Files
msm-5.15/mm/memremap.c
Greg Kroah-Hartman ac2a7a141f Merge 5.15.79 into android13-5.15-lts
Changes in 5.15.79
	thunderbolt: Tear down existing tunnels when resuming from hibernate
	thunderbolt: Add DP OUT resource when DP tunnel is discovered
	fuse: fix readdir cache race
	drm/amdkfd: avoid recursive lock in migrations back to RAM
	drm/amdkfd: handle CPU fault on COW mapping
	drm/amdkfd: Fix NULL pointer dereference in svm_migrate_to_ram()
	hwspinlock: qcom: correct MMIO max register for newer SoCs
	phy: stm32: fix an error code in probe
	wifi: cfg80211: silence a sparse RCU warning
	wifi: cfg80211: fix memory leak in query_regdb_file()
	soundwire: qcom: reinit broadcast completion
	soundwire: qcom: check for outanding writes before doing a read
	bpf, verifier: Fix memory leak in array reallocation for stack state
	bpf, sockmap: Fix the sk->sk_forward_alloc warning of sk_stream_kill_queues
	wifi: mac80211: Set TWT Information Frame Disabled bit as 1
	bpftool: Fix NULL pointer dereference when pin {PROG, MAP, LINK} without FILE
	HID: hyperv: fix possible memory leak in mousevsc_probe()
	bpf, sockmap: Fix sk->sk_forward_alloc warn_on in sk_stream_kill_queues
	bpf: Fix sockmap calling sleepable function in teardown path
	bpf, sock_map: Move cancel_work_sync() out of sock lock
	bpf: Add helper macro bpf_for_each_reg_in_vstate
	bpf: Fix wrong reg type conversion in release_reference()
	net: gso: fix panic on frag_list with mixed head alloc types
	macsec: delete new rxsc when offload fails
	macsec: fix secy->n_rx_sc accounting
	macsec: fix detection of RXSCs when toggling offloading
	macsec: clear encryption keys from the stack after setting up offload
	octeontx2-pf: Use hardware register for CQE count
	octeontx2-pf: NIX TX overwrites SQ_CTX_HW_S[SQ_INT]
	net: tun: Fix memory leaks of napi_get_frags
	bnxt_en: Fix possible crash in bnxt_hwrm_set_coal()
	bnxt_en: fix potentially incorrect return value for ndo_rx_flow_steer
	net: fman: Unregister ethernet device on removal
	capabilities: fix undefined behavior in bit shift for CAP_TO_MASK
	phy: ralink: mt7621-pci: add sentinel to quirks table
	KVM: s390: pv: don't allow userspace to set the clock under PV
	net: lapbether: fix issue of dev reference count leakage in lapbeth_device_event()
	hamradio: fix issue of dev reference count leakage in bpq_device_event()
	net: wwan: iosm: fix memory leak in ipc_wwan_dellink
	net: wwan: mhi: fix memory leak in mhi_mbim_dellink
	drm/vc4: Fix missing platform_unregister_drivers() call in vc4_drm_register()
	tcp: prohibit TCP_REPAIR_OPTIONS if data was already sent
	ipv6: addrlabel: fix infoleak when sending struct ifaddrlblmsg to network
	can: af_can: fix NULL pointer dereference in can_rx_register()
	net: stmmac: dwmac-meson8b: fix meson8b_devm_clk_prepare_enable()
	net: broadcom: Fix BCMGENET Kconfig
	tipc: fix the msg->req tlv len check in tipc_nl_compat_name_table_dump_header
	dmaengine: pxa_dma: use platform_get_irq_optional
	dmaengine: mv_xor_v2: Fix a resource leak in mv_xor_v2_remove()
	dmaengine: ti: k3-udma-glue: fix memory leak when register device fail
	net: lapbether: fix issue of invalid opcode in lapbeth_open()
	drivers: net: xgene: disable napi when register irq failed in xgene_enet_open()
	perf stat: Fix printing os->prefix in CSV metrics output
	perf tools: Add the include/perf/ directory to .gitignore
	netfilter: nfnetlink: fix potential dead lock in nfnetlink_rcv_msg()
	netfilter: Cleanup nft_net->module_list from nf_tables_exit_net()
	net: marvell: prestera: fix memory leak in prestera_rxtx_switch_init()
	net: nixge: disable napi when enable interrupts failed in nixge_open()
	net: wwan: iosm: fix memory leak in ipc_pcie_read_bios_cfg
	net/mlx5: Bridge, verify LAG state when adding bond to bridge
	net/mlx5: Allow async trigger completion execution on single CPU systems
	net/mlx5e: E-Switch, Fix comparing termination table instance
	net: cpsw: disable napi in cpsw_ndo_open()
	net: cxgb3_main: disable napi when bind qsets failed in cxgb_up()
	stmmac: intel: Enable 2.5Gbps for Intel AlderLake-S
	stmmac: intel: Update PCH PTP clock rate from 200MHz to 204.8MHz
	mctp: Fix an error handling path in mctp_init()
	cxgb4vf: shut down the adapter when t4vf_update_port_info() failed in cxgb4vf_open()
	stmmac: dwmac-loongson: fix missing pci_disable_msi() while module exiting
	stmmac: dwmac-loongson: fix missing pci_disable_device() in loongson_dwmac_probe()
	stmmac: dwmac-loongson: fix missing of_node_put() while module exiting
	net: phy: mscc: macsec: clear encryption keys when freeing a flow
	net: atlantic: macsec: clear encryption keys from the stack
	ethernet: s2io: disable napi when start nic failed in s2io_card_up()
	net: mv643xx_eth: disable napi when init rxq or txq failed in mv643xx_eth_open()
	ethernet: tundra: free irq when alloc ring failed in tsi108_open()
	net: macvlan: fix memory leaks of macvlan_common_newlink
	riscv: process: fix kernel info leakage
	riscv: vdso: fix build with llvm
	riscv: fix reserved memory setup
	arm64: efi: Fix handling of misaligned runtime regions and drop warning
	MIPS: jump_label: Fix compat branch range check
	mmc: cqhci: Provide helper for resetting both SDHCI and CQHCI
	mmc: sdhci-of-arasan: Fix SDHCI_RESET_ALL for CQHCI
	mmc: sdhci_am654: Fix SDHCI_RESET_ALL for CQHCI
	mmc: sdhci-tegra: Fix SDHCI_RESET_ALL for CQHCI
	mmc: sdhci-esdhc-imx: use the correct host caps for MMC_CAP_8_BIT_DATA
	ALSA: hda/hdmi - enable runtime pm for more AMD display audio
	ALSA: hda/ca0132: add quirk for EVGA Z390 DARK
	ALSA: hda: fix potential memleak in 'add_widget_node'
	ALSA: hda/realtek: Add Positivo C6300 model quirk
	ALSA: usb-audio: Yet more regression for for the delayed card registration
	ALSA: usb-audio: Add quirk entry for M-Audio Micro
	ALSA: usb-audio: Add DSD support for Accuphase DAC-60
	vmlinux.lds.h: Fix placement of '.data..decrypted' section
	ata: libata-scsi: fix SYNCHRONIZE CACHE (16) command failure
	nilfs2: fix deadlock in nilfs_count_free_blocks()
	nilfs2: fix use-after-free bug of ns_writer on remount
	drm/i915/dmabuf: fix sg_table handling in map_dma_buf
	drm/amdgpu: disable BACO on special BEIGE_GOBY card
	platform/x86: hp_wmi: Fix rfkill causing soft blocked wifi
	wifi: ath11k: avoid deadlock during regulatory update in ath11k_regd_update()
	btrfs: fix match incorrectly in dev_args_match_device
	btrfs: selftests: fix wrong error check in btrfs_free_dummy_root()
	btrfs: zoned: initialize device's zone info for seeding
	mms: sdhci-esdhc-imx: Fix SDHCI_RESET_ALL for CQHCI
	udf: Fix a slab-out-of-bounds write bug in udf_find_entry()
	mm/damon/dbgfs: check if rm_contexts input is for a real context
	mm/memremap.c: map FS_DAX device memory as decrypted
	mm/shmem: use page_mapping() to detect page cache for uffd continue
	can: j1939: j1939_send_one(): fix missing CAN header initialization
	cert host tools: Stop complaining about deprecated OpenSSL functions
	dmaengine: at_hdmac: Fix at_lli struct definition
	dmaengine: at_hdmac: Don't start transactions at tx_submit level
	dmaengine: at_hdmac: Start transfer for cyclic channels in issue_pending
	dmaengine: at_hdmac: Fix premature completion of desc in issue_pending
	dmaengine: at_hdmac: Do not call the complete callback on device_terminate_all
	dmaengine: at_hdmac: Protect atchan->status with the channel lock
	dmaengine: at_hdmac: Fix concurrency problems by removing atc_complete_all()
	dmaengine: at_hdmac: Fix concurrency over descriptor
	dmaengine: at_hdmac: Free the memset buf without holding the chan lock
	dmaengine: at_hdmac: Fix concurrency over the active list
	dmaengine: at_hdmac: Fix descriptor handling when issuing it to hardware
	dmaengine: at_hdmac: Fix completion of unissued descriptor in case of errors
	dmaengine: at_hdmac: Don't allow CPU to reorder channel enable
	dmaengine: at_hdmac: Fix impossible condition
	dmaengine: at_hdmac: Check return code of dma_async_device_register
	marvell: octeontx2: build error: unknown type name 'u64'
	drm/amdkfd: Migrate in CPU page fault use current mm
	net: tun: call napi_schedule_prep() to ensure we own a napi
	x86/cpu: Restore AMD's DE_CFG MSR after resume
	Linux 5.15.79

Change-Id: I6f77aa724b7aa43abcef3444af951c7c62d46303
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2022-12-15 07:25:07 +00:00

536 lines
14 KiB
C

// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2015 Intel Corporation. All rights reserved. */
#include <linux/device.h>
#include <linux/io.h>
#include <linux/kasan.h>
#include <linux/memory_hotplug.h>
#include <linux/mm.h>
#include <linux/pfn_t.h>
#include <linux/swap.h>
#include <linux/mmzone.h>
#include <linux/swapops.h>
#include <linux/types.h>
#include <linux/wait_bit.h>
#include <linux/xarray.h>
static DEFINE_XARRAY(pgmap_array);
/*
* The memremap() and memremap_pages() interfaces are alternately used
* to map persistent memory namespaces. These interfaces place different
* constraints on the alignment and size of the mapping (namespace).
* memremap() can map individual PAGE_SIZE pages. memremap_pages() can
* only map subsections (2MB), and at least one architecture (PowerPC)
* the minimum mapping granularity of memremap_pages() is 16MB.
*
* The role of memremap_compat_align() is to communicate the minimum
* arch supported alignment of a namespace such that it can freely
* switch modes without violating the arch constraint. Namely, do not
* allow a namespace to be PAGE_SIZE aligned since that namespace may be
* reconfigured into a mode that requires SUBSECTION_SIZE alignment.
*/
#ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN
unsigned long memremap_compat_align(void)
{
return SUBSECTION_SIZE;
}
EXPORT_SYMBOL_GPL(memremap_compat_align);
#endif
#ifdef CONFIG_DEV_PAGEMAP_OPS
DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
EXPORT_SYMBOL(devmap_managed_key);
static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
{
if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
pgmap->type == MEMORY_DEVICE_FS_DAX)
static_branch_dec(&devmap_managed_key);
}
static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
{
if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
pgmap->type == MEMORY_DEVICE_FS_DAX)
static_branch_inc(&devmap_managed_key);
}
#else
static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
{
}
static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
{
}
#endif /* CONFIG_DEV_PAGEMAP_OPS */
static void pgmap_array_delete(struct range *range)
{
xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end),
NULL, GFP_KERNEL);
synchronize_rcu();
}
static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id)
{
struct range *range = &pgmap->ranges[range_id];
unsigned long pfn = PHYS_PFN(range->start);
if (range_id)
return pfn;
return pfn + vmem_altmap_offset(pgmap_altmap(pgmap));
}
bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
{
int i;
for (i = 0; i < pgmap->nr_range; i++) {
struct range *range = &pgmap->ranges[i];
if (pfn >= PHYS_PFN(range->start) &&
pfn <= PHYS_PFN(range->end))
return pfn >= pfn_first(pgmap, i);
}
return false;
}
static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id)
{
const struct range *range = &pgmap->ranges[range_id];
return (range->start + range_len(range)) >> PAGE_SHIFT;
}
static unsigned long pfn_next(unsigned long pfn)
{
if (pfn % 1024 == 0)
cond_resched();
return pfn + 1;
}
#define for_each_device_pfn(pfn, map, i) \
for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); pfn = pfn_next(pfn))
static void dev_pagemap_kill(struct dev_pagemap *pgmap)
{
if (pgmap->ops && pgmap->ops->kill)
pgmap->ops->kill(pgmap);
else
percpu_ref_kill(pgmap->ref);
}
static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
{
if (pgmap->ops && pgmap->ops->cleanup) {
pgmap->ops->cleanup(pgmap);
} else {
wait_for_completion(&pgmap->done);
percpu_ref_exit(pgmap->ref);
}
/*
* Undo the pgmap ref assignment for the internal case as the
* caller may re-enable the same pgmap.
*/
if (pgmap->ref == &pgmap->internal_ref)
pgmap->ref = NULL;
}
static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
{
struct range *range = &pgmap->ranges[range_id];
struct page *first_page;
/* make sure to access a memmap that was actually initialized */
first_page = pfn_to_page(pfn_first(pgmap, range_id));
/* pages are dead and unused, undo the arch mapping */
mem_hotplug_begin();
remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start),
PHYS_PFN(range_len(range)));
if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
__remove_pages(PHYS_PFN(range->start),
PHYS_PFN(range_len(range)), NULL);
} else {
arch_remove_memory(range->start, range_len(range),
pgmap_altmap(pgmap));
kasan_remove_zero_shadow(__va(range->start), range_len(range));
}
mem_hotplug_done();
untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
pgmap_array_delete(range);
}
void memunmap_pages(struct dev_pagemap *pgmap)
{
unsigned long pfn;
int i;
dev_pagemap_kill(pgmap);
for (i = 0; i < pgmap->nr_range; i++)
for_each_device_pfn(pfn, pgmap, i)
put_page(pfn_to_page(pfn));
dev_pagemap_cleanup(pgmap);
for (i = 0; i < pgmap->nr_range; i++)
pageunmap_range(pgmap, i);
WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
devmap_managed_enable_put(pgmap);
}
EXPORT_SYMBOL_GPL(memunmap_pages);
static void devm_memremap_pages_release(void *data)
{
memunmap_pages(data);
}
static void dev_pagemap_percpu_release(struct percpu_ref *ref)
{
struct dev_pagemap *pgmap =
container_of(ref, struct dev_pagemap, internal_ref);
complete(&pgmap->done);
}
static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
int range_id, int nid)
{
const bool is_private = pgmap->type == MEMORY_DEVICE_PRIVATE;
struct range *range = &pgmap->ranges[range_id];
struct dev_pagemap *conflict_pgmap;
int error, is_ram;
if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0,
"altmap not supported for multiple ranges\n"))
return -EINVAL;
conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL);
if (conflict_pgmap) {
WARN(1, "Conflicting mapping in same section\n");
put_dev_pagemap(conflict_pgmap);
return -ENOMEM;
}
conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL);
if (conflict_pgmap) {
WARN(1, "Conflicting mapping in same section\n");
put_dev_pagemap(conflict_pgmap);
return -ENOMEM;
}
is_ram = region_intersects(range->start, range_len(range),
IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
if (is_ram != REGION_DISJOINT) {
WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n",
is_ram == REGION_MIXED ? "mixed" : "ram",
range->start, range->end);
return -ENXIO;
}
error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start),
PHYS_PFN(range->end), pgmap, GFP_KERNEL));
if (error)
return error;
if (nid < 0)
nid = numa_mem_id();
error = track_pfn_remap(NULL, &params->pgprot, PHYS_PFN(range->start), 0,
range_len(range));
if (error)
goto err_pfn_remap;
if (!mhp_range_allowed(range->start, range_len(range), !is_private)) {
error = -EINVAL;
goto err_kasan;
}
mem_hotplug_begin();
/*
* For device private memory we call add_pages() as we only need to
* allocate and initialize struct page for the device memory. More-
* over the device memory is un-accessible thus we do not want to
* create a linear mapping for the memory like arch_add_memory()
* would do.
*
* For all other device memory types, which are accessible by
* the CPU, we do want the linear mapping and thus use
* arch_add_memory().
*/
if (is_private) {
error = add_pages(nid, PHYS_PFN(range->start),
PHYS_PFN(range_len(range)), params);
} else {
error = kasan_add_zero_shadow(__va(range->start), range_len(range));
if (error) {
mem_hotplug_done();
goto err_kasan;
}
error = arch_add_memory(nid, range->start, range_len(range),
params);
}
if (!error) {
struct zone *zone;
zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
PHYS_PFN(range_len(range)), params->altmap,
MIGRATE_MOVABLE);
}
mem_hotplug_done();
if (error)
goto err_add_memory;
/*
* Initialization of the pages has been deferred until now in order
* to allow us to do the work while not holding the hotplug lock.
*/
memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
PHYS_PFN(range->start),
PHYS_PFN(range_len(range)), pgmap);
percpu_ref_get_many(pgmap->ref, pfn_end(pgmap, range_id)
- pfn_first(pgmap, range_id));
return 0;
err_add_memory:
kasan_remove_zero_shadow(__va(range->start), range_len(range));
err_kasan:
untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
err_pfn_remap:
pgmap_array_delete(range);
return error;
}
/*
* Not device managed version of dev_memremap_pages, undone by
* memunmap_pages(). Please use dev_memremap_pages if you have a struct
* device available.
*/
void *memremap_pages(struct dev_pagemap *pgmap, int nid)
{
struct mhp_params params = {
.altmap = pgmap_altmap(pgmap),
.pgprot = PAGE_KERNEL,
};
const int nr_range = pgmap->nr_range;
int error, i;
if (WARN_ONCE(!nr_range, "nr_range must be specified\n"))
return ERR_PTR(-EINVAL);
switch (pgmap->type) {
case MEMORY_DEVICE_PRIVATE:
if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
WARN(1, "Device private memory not supported\n");
return ERR_PTR(-EINVAL);
}
if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
WARN(1, "Missing migrate_to_ram method\n");
return ERR_PTR(-EINVAL);
}
if (!pgmap->ops->page_free) {
WARN(1, "Missing page_free method\n");
return ERR_PTR(-EINVAL);
}
if (!pgmap->owner) {
WARN(1, "Missing owner\n");
return ERR_PTR(-EINVAL);
}
break;
case MEMORY_DEVICE_FS_DAX:
if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
WARN(1, "File system DAX not supported\n");
return ERR_PTR(-EINVAL);
}
params.pgprot = pgprot_decrypted(params.pgprot);
break;
case MEMORY_DEVICE_GENERIC:
break;
case MEMORY_DEVICE_PCI_P2PDMA:
params.pgprot = pgprot_noncached(params.pgprot);
break;
default:
WARN(1, "Invalid pgmap type %d\n", pgmap->type);
break;
}
if (!pgmap->ref) {
if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
return ERR_PTR(-EINVAL);
init_completion(&pgmap->done);
error = percpu_ref_init(&pgmap->internal_ref,
dev_pagemap_percpu_release, 0, GFP_KERNEL);
if (error)
return ERR_PTR(error);
pgmap->ref = &pgmap->internal_ref;
} else {
if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
WARN(1, "Missing reference count teardown definition\n");
return ERR_PTR(-EINVAL);
}
}
devmap_managed_enable_get(pgmap);
/*
* Clear the pgmap nr_range as it will be incremented for each
* successfully processed range. This communicates how many
* regions to unwind in the abort case.
*/
pgmap->nr_range = 0;
error = 0;
for (i = 0; i < nr_range; i++) {
error = pagemap_range(pgmap, &params, i, nid);
if (error)
break;
pgmap->nr_range++;
}
if (i < nr_range) {
memunmap_pages(pgmap);
pgmap->nr_range = nr_range;
return ERR_PTR(error);
}
return __va(pgmap->ranges[0].start);
}
EXPORT_SYMBOL_GPL(memremap_pages);
/**
* devm_memremap_pages - remap and provide memmap backing for the given resource
* @dev: hosting device for @res
* @pgmap: pointer to a struct dev_pagemap
*
* Notes:
* 1/ At a minimum the res and type members of @pgmap must be initialized
* by the caller before passing it to this function
*
* 2/ The altmap field may optionally be initialized, in which case
* PGMAP_ALTMAP_VALID must be set in pgmap->flags.
*
* 3/ The ref field may optionally be provided, in which pgmap->ref must be
* 'live' on entry and will be killed and reaped at
* devm_memremap_pages_release() time, or if this routine fails.
*
* 4/ range is expected to be a host memory range that could feasibly be
* treated as a "System RAM" range, i.e. not a device mmio range, but
* this is not enforced.
*/
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
{
int error;
void *ret;
ret = memremap_pages(pgmap, dev_to_node(dev));
if (IS_ERR(ret))
return ret;
error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
pgmap);
if (error)
return ERR_PTR(error);
return ret;
}
EXPORT_SYMBOL_GPL(devm_memremap_pages);
void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
{
devm_release_action(dev, devm_memremap_pages_release, pgmap);
}
EXPORT_SYMBOL_GPL(devm_memunmap_pages);
unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
{
/* number of pfns from base where pfn_to_page() is valid */
if (altmap)
return altmap->reserve + altmap->free;
return 0;
}
void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
{
altmap->alloc -= nr_pfns;
}
/**
* get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
* @pfn: page frame number to lookup page_map
* @pgmap: optional known pgmap that already has a reference
*
* If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
* is non-NULL but does not cover @pfn the reference to it will be released.
*/
struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
struct dev_pagemap *pgmap)
{
resource_size_t phys = PFN_PHYS(pfn);
/*
* In the cached case we're already holding a live reference.
*/
if (pgmap) {
if (phys >= pgmap->range.start && phys <= pgmap->range.end)
return pgmap;
put_dev_pagemap(pgmap);
}
/* fall back to slow path lookup */
rcu_read_lock();
pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
pgmap = NULL;
rcu_read_unlock();
return pgmap;
}
EXPORT_SYMBOL_GPL(get_dev_pagemap);
#ifdef CONFIG_DEV_PAGEMAP_OPS
void free_devmap_managed_page(struct page *page)
{
/* notify page idle for dax */
if (!is_device_private_page(page)) {
wake_up_var(&page->_refcount);
return;
}
__ClearPageWaiters(page);
mem_cgroup_uncharge(page);
/*
* When a device_private page is freed, the page->mapping field
* may still contain a (stale) mapping value. For example, the
* lower bits of page->mapping may still identify the page as an
* anonymous page. Ultimately, this entire field is just stale
* and wrong, and it will cause errors if not cleared. One
* example is:
*
* migrate_vma_pages()
* migrate_vma_insert_page()
* page_add_new_anon_rmap()
* __page_set_anon_rmap()
* ...checks page->mapping, via PageAnon(page) call,
* and incorrectly concludes that the page is an
* anonymous page. Therefore, it incorrectly,
* silently fails to set up the new anon rmap.
*
* For other types of ZONE_DEVICE pages, migration is either
* handled differently or not done at all, so there is no need
* to clear page->mapping.
*/
page->mapping = NULL;
page->pgmap->ops->page_free(page);
}
#endif /* CONFIG_DEV_PAGEMAP_OPS */