ANDROID: add support for clang Shadow Call Stack (SCS)

This change adds generic support for clang's Shadow Call Stack, which
uses a shadow stack to protect return addresses from being overwritten
by an attacker. Details are available here:

  https://clang.llvm.org/docs/ShadowCallStack.html

Bug: 112277034
Change-Id: Idd553b7c978b0673ab533a68980fb9a654f4510c
Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
This commit is contained in:
Sami Tolvanen
2018-05-02 10:44:59 -07:00
parent be002810fd
commit f344424f3c
10 changed files with 211 additions and 0 deletions

View File

@@ -711,6 +711,13 @@ DISABLE_LTO += $(DISABLE_CFI)
export DISABLE_CFI
endif
ifdef CONFIG_SHADOW_CALL_STACK
scs-flags := -fsanitize=shadow-call-stack
KBUILD_CFLAGS += $(scs-flags)
DISABLE_SCS := -fno-sanitize=shadow-call-stack
export DISABLE_SCS
endif
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += $(call cc-option,-Oz,-Os)
KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
@@ -1200,6 +1207,11 @@ ifdef cfi-flags
ifeq ($(call cc-option, $(cfi-flags)),)
@echo Cannot use CONFIG_CFI: $(cfi-flags) not supported by compiler >&2 && exit 1
endif
endif
ifdef scs-flags
ifeq ($(call cc-option, $(scs-flags)),)
@echo Cannot use CONFIG_SHADOW_CALL_STACK: $(scs-flags) not supported by compiler >&2 && exit 1
endif
endif
@:

View File

@@ -574,6 +574,37 @@ config CFI_CLANG_SHADOW
If you select this option, the kernel builds a fast look-up table of
CFI check functions in loaded modules to reduce overhead.
config ARCH_SUPPORTS_SHADOW_CALL_STACK
bool
help
An architecture should select this if it supports clang's Shadow
Call Stack, has asm/scs.h, and implements runtime support for shadow
stack switching.
choice
prompt "Return-oriented programming (ROP) protection"
default ROP_PROTECTION_NONE
help
This option controls kernel protections against return-oriented
programming (ROP) attacks, which involve overwriting function return
addresses.
config ROP_PROTECTION_NONE
bool "None"
config SHADOW_CALL_STACK
bool "clang Shadow Call Stack (EXPERIMENTAL)"
depends on ARCH_SUPPORTS_SHADOW_CALL_STACK
help
This option enables clang's Shadow Call Stack, which uses a shadow
stack to protect function return addresses from being overwritten by
an attacker. More information can be found from clang's
documentation:
https://clang.llvm.org/docs/ShadowCallStack.html
endchoice
config HAVE_ARCH_WITHIN_STACK_FRAMES
bool
help

View File

@@ -33,6 +33,8 @@
#define __nocfi __attribute__((no_sanitize("cfi")))
#endif
#define __noscs __attribute__((no_sanitize("shadow-call-stack")))
/* all clang versions usable with the kernel support KASAN ABI version 5 */
#define KASAN_ABI_VERSION 5

View File

@@ -459,6 +459,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
#define __nocfi
#endif
#ifndef __noscs
#define __noscs
#endif
/*
* Assume alignment of return value.
*/

66
include/linux/scs.h Normal file
View File

@@ -0,0 +1,66 @@
/*
* Shadow Call Stack support.
*
* Copyright (C) 2018 Google LLC
*/
#ifndef _LINUX_SCS_H
#define _LINUX_SCS_H
#ifdef CONFIG_SHADOW_CALL_STACK
#include <linux/gfp.h>
#include <linux/sched.h>
#include <asm/page.h>
#define SCS_SIZE 512
#define SCS_GFP (GFP_KERNEL | __GFP_ZERO)
extern unsigned long init_shadow_call_stack[];
static inline void *task_scs(struct task_struct *tsk)
{
return task_thread_info(tsk)->shadow_call_stack;
}
static inline void task_set_scs(struct task_struct *tsk, void *s)
{
task_thread_info(tsk)->shadow_call_stack = s;
}
extern void scs_set_init_magic(struct task_struct *tsk);
extern void scs_task_init(struct task_struct *tsk);
extern int scs_prepare(struct task_struct *tsk, int node);
extern void scs_release(struct task_struct *tsk);
#else /* CONFIG_SHADOW_CALL_STACK */
static inline void *task_scs(struct task_struct *tsk)
{
return 0;
}
static inline void task_set_scs(struct task_struct *tsk, void *s)
{
}
static inline void scs_set_init_magic(struct task_struct *tsk)
{
}
static inline void scs_task_init(struct task_struct *tsk)
{
}
static inline int scs_prepare(struct task_struct *tsk, int node)
{
return 0;
}
static inline void scs_release(struct task_struct *tsk)
{
}
#endif /* CONFIG_SHADOW_CALL_STACK */
#endif /* _LINUX_SCS_H */

View File

@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/scs.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
@@ -14,6 +15,11 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
#ifdef CONFIG_SHADOW_CALL_STACK
unsigned long init_shadow_call_stack[SCS_SIZE / sizeof(long)]
__init_task_data __aligned(SCS_SIZE);
#endif
/* Initial task structure */
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);

View File

@@ -82,6 +82,7 @@
#include <linux/io.h>
#include <linux/kaiser.h>
#include <linux/cache.h>
#include <linux/scs.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -484,6 +485,8 @@ asmlinkage __visible void __init start_kernel(void)
char *after_dashes;
set_task_stack_end_magic(&init_task);
scs_set_init_magic(&init_task);
smp_setup_processor_id();
debug_objects_early_init();

View File

@@ -106,6 +106,7 @@ obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-$(CONFIG_CPU_PM) += cpu_pm.o
obj-$(CONFIG_BPF) += bpf/
obj-$(CONFIG_CFI_CLANG) += cfi.o
obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
obj-$(CONFIG_PERF_EVENTS) += events/

View File

@@ -78,6 +78,7 @@
#include <linux/sysctl.h>
#include <linux/kcov.h>
#include <linux/cpufreq_times.h>
#include <linux/scs.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -343,6 +344,8 @@ void put_task_stack(struct task_struct *tsk)
void free_task(struct task_struct *tsk)
{
scs_release(tsk);
#ifndef CONFIG_THREAD_INFO_IN_TASK
/*
* The task is finally done with both the stack and thread_info,
@@ -532,6 +535,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
clear_user_return_notifier(tsk);
clear_tsk_need_resched(tsk);
set_task_stack_end_magic(tsk);
scs_task_init(tsk);
#ifdef CONFIG_CC_STACKPROTECTOR
tsk->stack_canary = get_random_long();
@@ -1707,6 +1711,9 @@ static __latent_entropy struct task_struct *copy_process(
retval = copy_thread_tls(clone_flags, stack_start, stack_size, p, tls);
if (retval)
goto bad_fork_cleanup_io;
retval = scs_prepare(p, node);
if (retval)
goto bad_fork_cleanup_thread;
if (pid != &init_struct_pid) {
pid = alloc_pid(p->nsproxy->pid_ns_for_children);

79
kernel/scs.c Normal file
View File

@@ -0,0 +1,79 @@
/*
* Shadow Call Stack support.
*
* Copyright (C) 2018 Google LLC
*/
#include <linux/cpuhotplug.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/scs.h>
#include <asm/scs.h>
#define SCS_END_MAGIC 0xaf0194819b1635f6UL
static inline void *__scs_base(struct task_struct *tsk)
{
return (void *)((uintptr_t)task_scs(tsk) & ~(SCS_SIZE - 1));
}
static inline void *scs_alloc(int node)
{
return kmalloc(SCS_SIZE, SCS_GFP);
}
static inline void scs_free(void *s)
{
kfree(s);
}
static inline unsigned long *scs_magic(struct task_struct *tsk)
{
return (unsigned long *)(__scs_base(tsk) + SCS_SIZE - sizeof(long));
}
static inline void scs_set_magic(struct task_struct *tsk)
{
*scs_magic(tsk) = SCS_END_MAGIC;
}
void scs_task_init(struct task_struct *tsk)
{
task_set_scs(tsk, NULL);
}
void scs_set_init_magic(struct task_struct *tsk)
{
scs_save(tsk);
scs_set_magic(tsk);
scs_load(tsk);
}
int scs_prepare(struct task_struct *tsk, int node)
{
void *s;
s = scs_alloc(node);
if (!s)
return -ENOMEM;
task_set_scs(tsk, s);
scs_set_magic(tsk);
return 0;
}
void scs_release(struct task_struct *tsk)
{
void *s;
s = __scs_base(tsk);
if (!s)
return;
BUG_ON(*scs_magic(tsk) != SCS_END_MAGIC);
scs_task_init(tsk);
scs_free(s);
}