Revert "memremap: remove support for external pgmap refcounts"

This reverts commit d18a908565 which is
commit b80892ca022e9eb484771a66eb68e12364695a2a upstream.

It breaks the Android GKI kernel abi, and is not needed for Android
devices, so revert it for now.  If it is needed for this branch, it can
come back later in an ABI-stable way.

Bug: 161946584
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ifc8817c504e48ece90813a42015cb91fc04de5e4
This commit is contained in:
Greg Kroah-Hartman
2022-08-22 21:41:17 +02:00
parent 640530541d
commit 0b75a27c7a
4 changed files with 92 additions and 26 deletions

View File

@@ -219,7 +219,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
error = gen_pool_add_owner(p2pdma->pool, (unsigned long)addr,
pci_bus_address(pdev, bar) + offset,
range_len(&pgmap->range), dev_to_node(&pdev->dev),
&pgmap->ref);
pgmap->ref);
if (error)
goto pages_free;

View File

@@ -72,6 +72,16 @@ struct dev_pagemap_ops {
*/
void (*page_free)(struct page *page);
/*
* Transition the refcount in struct dev_pagemap to the dead state.
*/
void (*kill)(struct dev_pagemap *pgmap);
/*
* Wait for refcount in struct dev_pagemap to be idle and reap it.
*/
void (*cleanup)(struct dev_pagemap *pgmap);
/*
* Used for private (un-addressable) device memory only. Must migrate
* the page back to a CPU accessible page.
@@ -85,7 +95,8 @@ struct dev_pagemap_ops {
* struct dev_pagemap - metadata for ZONE_DEVICE mappings
* @altmap: pre-allocated/reserved memory for vmemmap allocations
* @ref: reference count that pins the devm_memremap_pages() mapping
* @done: completion for @ref
* @internal_ref: internal reference if @ref is not provided by the caller
* @done: completion for @internal_ref
* @type: memory type: see MEMORY_* in memory_hotplug.h
* @flags: PGMAP_* flags to specify defailed behavior
* @ops: method table
@@ -98,7 +109,8 @@ struct dev_pagemap_ops {
*/
struct dev_pagemap {
struct vmem_altmap altmap;
struct percpu_ref ref;
struct percpu_ref *ref;
struct percpu_ref internal_ref;
struct completion done;
enum memory_type type;
unsigned int flags;
@@ -179,7 +191,7 @@ static inline unsigned long memremap_compat_align(void)
static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
{
if (pgmap)
percpu_ref_put(&pgmap->ref);
percpu_ref_put(pgmap->ref);
}
#endif /* _LINUX_MEMREMAP_H_ */

View File

@@ -112,6 +112,30 @@ static unsigned long pfn_next(unsigned long pfn)
#define for_each_device_pfn(pfn, map, i) \
for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); pfn = pfn_next(pfn))
static void dev_pagemap_kill(struct dev_pagemap *pgmap)
{
if (pgmap->ops && pgmap->ops->kill)
pgmap->ops->kill(pgmap);
else
percpu_ref_kill(pgmap->ref);
}
static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
{
if (pgmap->ops && pgmap->ops->cleanup) {
pgmap->ops->cleanup(pgmap);
} else {
wait_for_completion(&pgmap->done);
percpu_ref_exit(pgmap->ref);
}
/*
* Undo the pgmap ref assignment for the internal case as the
* caller may re-enable the same pgmap.
*/
if (pgmap->ref == &pgmap->internal_ref)
pgmap->ref = NULL;
}
static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
{
struct range *range = &pgmap->ranges[range_id];
@@ -143,12 +167,11 @@ void memunmap_pages(struct dev_pagemap *pgmap)
unsigned long pfn;
int i;
percpu_ref_kill(&pgmap->ref);
dev_pagemap_kill(pgmap);
for (i = 0; i < pgmap->nr_range; i++)
for_each_device_pfn(pfn, pgmap, i)
put_page(pfn_to_page(pfn));
wait_for_completion(&pgmap->done);
percpu_ref_exit(&pgmap->ref);
dev_pagemap_cleanup(pgmap);
for (i = 0; i < pgmap->nr_range; i++)
pageunmap_range(pgmap, i);
@@ -165,7 +188,8 @@ static void devm_memremap_pages_release(void *data)
static void dev_pagemap_percpu_release(struct percpu_ref *ref)
{
struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref);
struct dev_pagemap *pgmap =
container_of(ref, struct dev_pagemap, internal_ref);
complete(&pgmap->done);
}
@@ -271,8 +295,8 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
PHYS_PFN(range->start),
PHYS_PFN(range_len(range)), pgmap);
percpu_ref_get_many(&pgmap->ref,
pfn_end(pgmap, range_id) - pfn_first(pgmap, range_id));
percpu_ref_get_many(pgmap->ref, pfn_end(pgmap, range_id)
- pfn_first(pgmap, range_id));
return 0;
err_add_memory:
@@ -338,11 +362,22 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
break;
}
init_completion(&pgmap->done);
error = percpu_ref_init(&pgmap->ref, dev_pagemap_percpu_release, 0,
GFP_KERNEL);
if (error)
return ERR_PTR(error);
if (!pgmap->ref) {
if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
return ERR_PTR(-EINVAL);
init_completion(&pgmap->done);
error = percpu_ref_init(&pgmap->internal_ref,
dev_pagemap_percpu_release, 0, GFP_KERNEL);
if (error)
return ERR_PTR(error);
pgmap->ref = &pgmap->internal_ref;
} else {
if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
WARN(1, "Missing reference count teardown definition\n");
return ERR_PTR(-EINVAL);
}
}
devmap_managed_enable_get(pgmap);
@@ -451,7 +486,7 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
/* fall back to slow path lookup */
rcu_read_lock();
pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
if (pgmap && !percpu_ref_tryget_live(&pgmap->ref))
if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
pgmap = NULL;
rcu_read_unlock();

View File

@@ -100,17 +100,25 @@ static void nfit_test_kill(void *_pgmap)
{
struct dev_pagemap *pgmap = _pgmap;
WARN_ON(!pgmap);
WARN_ON(!pgmap || !pgmap->ref);
percpu_ref_kill(&pgmap->ref);
if (pgmap->ops && pgmap->ops->kill)
pgmap->ops->kill(pgmap);
else
percpu_ref_kill(pgmap->ref);
wait_for_completion(&pgmap->done);
percpu_ref_exit(&pgmap->ref);
if (pgmap->ops && pgmap->ops->cleanup) {
pgmap->ops->cleanup(pgmap);
} else {
wait_for_completion(&pgmap->done);
percpu_ref_exit(pgmap->ref);
}
}
static void dev_pagemap_percpu_release(struct percpu_ref *ref)
{
struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref);
struct dev_pagemap *pgmap =
container_of(ref, struct dev_pagemap, internal_ref);
complete(&pgmap->done);
}
@@ -124,11 +132,22 @@ void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
if (!nfit_res)
return devm_memremap_pages(dev, pgmap);
init_completion(&pgmap->done);
error = percpu_ref_init(&pgmap->ref, dev_pagemap_percpu_release, 0,
GFP_KERNEL);
if (error)
return ERR_PTR(error);
if (!pgmap->ref) {
if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
return ERR_PTR(-EINVAL);
init_completion(&pgmap->done);
error = percpu_ref_init(&pgmap->internal_ref,
dev_pagemap_percpu_release, 0, GFP_KERNEL);
if (error)
return ERR_PTR(error);
pgmap->ref = &pgmap->internal_ref;
} else {
if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
WARN(1, "Missing reference count teardown definition\n");
return ERR_PTR(-EINVAL);
}
}
error = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
if (error)