From dabb099b501e51689f25f0068a82c19e6a21d887 Mon Sep 17 00:00:00 2001 From: Fangrui Song Date: Mon, 2 Nov 2020 17:23:58 -0800 Subject: [PATCH] BACKPORT: x86/lib: Change .weak to SYM_FUNC_START_WEAK for arch/x86/lib/mem*_64.S Commit 393f203f5fd5 ("x86_64: kasan: add interceptors for memset/memmove/memcpy functions") added .weak directives to arch/x86/lib/mem*_64.S instead of changing the existing ENTRY macros to WEAK. This can lead to the assembly snippet .weak memcpy ... .globl memcpy which will produce a STB_WEAK memcpy with GNU as but STB_GLOBAL memcpy with LLVM's integrated assembler before LLVM 12. LLVM 12 (since https://reviews.llvm.org/D90108) will error on such an overridden symbol binding. Commit ef1e03152cb0 ("x86/asm: Make some functions local") changed ENTRY in arch/x86/lib/memcpy_64.S to SYM_FUNC_START_LOCAL, which was ineffective due to the preceding .weak directive. Use the appropriate SYM_FUNC_START_WEAK instead. Fixes: 393f203f5fd5 ("x86_64: kasan: add interceptors for memset/memmove/memcpy functions") Fixes: ef1e03152cb0 ("x86/asm: Make some functions local") Reported-by: Sami Tolvanen Change-Id: I77420199abb62cacbed4de8d3b244f77e43a7f38 Signed-off-by: Fangrui Song Signed-off-by: Borislav Petkov Reviewed-by: Nick Desaulniers Tested-by: Nathan Chancellor Tested-by: Nick Desaulniers Cc: Link: https://lkml.kernel.org/r/20201103012358.168682-1-maskray@google.com Signed-off-by: Naveen <133593113+elohim-etz@users.noreply.github.com> --- arch/x86/lib/memcpy_64.S | 4 +--- arch/x86/lib/memmove_64.S | 3 +-- arch/x86/lib/memset_64.S | 4 +--- 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 9a53a06e5a3e..e9cb38ebbc7e 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -13,8 +13,6 @@ * to a jmp to memcpy_erms which does the REP; MOVSB mem copy. */ -.weak memcpy - /* * memcpy - Copy a memory block. * @@ -27,7 +25,7 @@ * rax original destination */ ENTRY(__memcpy) -ENTRY(memcpy) +WEAK(memcpy) ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \ "jmp memcpy_erms", X86_FEATURE_ERMS diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S index bbec69d8223b..1ef53d8a8ade 100644 --- a/arch/x86/lib/memmove_64.S +++ b/arch/x86/lib/memmove_64.S @@ -24,9 +24,8 @@ * Output: * rax: dest */ -.weak memmove -ENTRY(memmove) +WEAK(memmove) ENTRY(__memmove) /* Handle more 32 bytes in loop */ diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S index 9bc861c71e75..b6e4238b2258 100644 --- a/arch/x86/lib/memset_64.S +++ b/arch/x86/lib/memset_64.S @@ -6,8 +6,6 @@ #include #include -.weak memset - /* * ISO C memset - set a memory block to a byte value. This function uses fast * string to get better performance than the original function. The code is @@ -19,7 +17,7 @@ * * rax original destination */ -ENTRY(memset) +WEAK(memset) ENTRY(__memset) /* * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended