Revert "ANDROID: mm/slab: Create 64-byte slab caches if the hardware supports it"
This reverts commit 8a7073c106.
When the size passed to a kmalloc() call is not known at compile time,
__kmalloc() is invoked to satisfy the allocation request. __kmalloc()
is tagged with the __assume_aligned compiler attribute, with a value
of ARCH_KMALLOC_MINALIGN, which means that pointers returned by
__kmalloc() are guaranteed to be aligned to ARCH_KMALLOC_MINALIGN.
For ARM64, ARCH_KMALLOC_MINALIGN is defined as 128 bytes. However,
when the android_kmalloc_64_create command line parameter is used,
the assumption that pointers from __kmalloc() are aligned to 128 bytes
is no longer true, as allocations can be satisfied from the 64 byte slab
caches, and those objects would be 64 byte aligned.
This inconsistency between the runtime behavior of __kmalloc() and
the expected behavior of __kmalloc() that was established at compile
time, can lead to a pointer from __kmalloc() being operated on
incorrectly.
Consider the following case:
static void foo() {
unsigned long buf_offset;
u8 *data_buf = __kmalloc();
...
buf_offset = offset_in_page(data_buf)
}
Assuming a 4 KB page size, the offset_in_page() macro should compile to
just:
data_buf & 0xfff
However, an examination of the code that the Clang compiler emits
revealed that the offset_in_page() operation in this case evaluates to:
data_buf & 0xf80
This occurs because the code for offset_in_page() is inlined into the
body of foo, which obtains a pointer directly from invoking __kmalloc().
When deciding what constant to use to calculate the offset into the
page, the compiler can use its knowledge of the alignment of pointers
from __kmalloc() to generate a mask that may be more optimal for
further operations (note that the mask that is used--0xf80--is just
0xfff aligned to 128 bytes).
The scenario described above is problematic if a buffer that was
allocated through kmalloc is 64-byte aligned, as the offset of the
buffer into the page will be calculated incorrectly, which can cause
inconsistencies about where the start of the buffer is (e.g. when
DMA-mapping buffers for peripherals).
Thus, remove support for android_kmalloc_64_create to avoid creating
this mismatch in the behavior of __kmalloc().
Bug: 288214403
Bug: 291097092
Change-Id: Ie22df9f5918253d23b8c2a0c0d64f54a710b0514
[isaacmanjarres: keeping the android_kmalloc_64_create variable to
preservethe KMI.]
Signed-off-by: Isaac J. Manjarres <isaacmanjarres@google.com>
This commit is contained in:
committed by
Isaac Manjarres
parent
4b24277c7b
commit
ab27b98371
@@ -338,10 +338,6 @@
|
||||
connected to one of 16 gameports
|
||||
Format: <type1>,<type2>,..<type16>
|
||||
|
||||
android_kmalloc_64_create [MM]
|
||||
Creates all kmalloc variants of the 64-byte slab cache,
|
||||
if the hardware supports it.
|
||||
|
||||
apc= [HW,SPARC]
|
||||
Power management functions (SPARCstation-4/5 + deriv.)
|
||||
Format: noidle
|
||||
|
||||
@@ -343,8 +343,6 @@ enum kmalloc_cache_type {
|
||||
extern struct kmem_cache *
|
||||
kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
|
||||
|
||||
extern bool android_kmalloc_64_create;
|
||||
|
||||
/*
|
||||
* Define gfp bits that should not be set for KMALLOC_NORMAL.
|
||||
*/
|
||||
@@ -396,9 +394,6 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
|
||||
if (!size)
|
||||
return 0;
|
||||
|
||||
if (android_kmalloc_64_create && size <= 64)
|
||||
return 6;
|
||||
|
||||
if (size <= KMALLOC_MIN_SIZE)
|
||||
return KMALLOC_SHIFT_LOW;
|
||||
|
||||
|
||||
@@ -646,15 +646,6 @@ EXPORT_SYMBOL_GPL(kmem_dump_obj);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_SLOB
|
||||
static int __init setup_android_kmalloc_64_create(char *str)
|
||||
{
|
||||
if (IS_ALIGNED(64, cache_line_size()))
|
||||
android_kmalloc_64_create = true;
|
||||
|
||||
return 1;
|
||||
}
|
||||
__setup("android_kmalloc_64_create", setup_android_kmalloc_64_create);
|
||||
|
||||
/* Create a cache during boot when no slab services are available yet */
|
||||
void __init create_boot_cache(struct kmem_cache *s, const char *name,
|
||||
unsigned int size, slab_flags_t flags,
|
||||
@@ -663,14 +654,6 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name,
|
||||
int err;
|
||||
unsigned int align = ARCH_KMALLOC_MINALIGN;
|
||||
|
||||
/*
|
||||
* Ensure object alignment is 64. Otherwise, it can be larger
|
||||
* (e.g. 128 with ARM64), which causes SLUB to increase the object
|
||||
* size to 128 bytes to conform with the alignment.
|
||||
*/
|
||||
if (android_kmalloc_64_create && size == 64)
|
||||
align = 64;
|
||||
|
||||
s->name = name;
|
||||
s->size = s->object_size = size;
|
||||
|
||||
@@ -715,6 +698,7 @@ kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
|
||||
{ /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ };
|
||||
EXPORT_SYMBOL(kmalloc_caches);
|
||||
|
||||
/* This variable is intentionally unused. Preserved for KMI stability. */
|
||||
bool android_kmalloc_64_create __ro_after_init;
|
||||
EXPORT_SYMBOL(android_kmalloc_64_create);
|
||||
|
||||
@@ -864,10 +848,6 @@ void __init setup_kmalloc_cache_index_table(void)
|
||||
size_index[elem] = KMALLOC_SHIFT_LOW;
|
||||
}
|
||||
|
||||
if (android_kmalloc_64_create)
|
||||
for (i = 8; i <= 64; i += 8)
|
||||
size_index[size_index_elem(i)] = 6;
|
||||
|
||||
if (KMALLOC_MIN_SIZE >= 64) {
|
||||
/*
|
||||
* The 96 byte size cache is not used if the alignment
|
||||
@@ -925,10 +905,6 @@ void __init create_kmalloc_caches(slab_flags_t flags)
|
||||
int i;
|
||||
enum kmalloc_cache_type type;
|
||||
|
||||
if (android_kmalloc_64_create)
|
||||
for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++)
|
||||
new_kmalloc_cache(6, type, flags);
|
||||
|
||||
/*
|
||||
* Including KMALLOC_CGROUP if CONFIG_MEMCG_KMEM defined
|
||||
*/
|
||||
|
||||
Reference in New Issue
Block a user