From 07525c4e9e9371a71456f1111da0623639f6ab58 Mon Sep 17 00:00:00 2001
From: Alexey Makhalov <amakhalov@vmware.com>
Date: Sat, 4 Feb 2017 04:15:14 +0000
Subject: [PATCH 3/3] Added PAX_RANDKSTACK
---
arch/x86/entry/entry_64.S | 16 ++++++++++++++++
arch/x86/kernel/process_64.c | 21 +++++++++++++++++++++
security/Kconfig | 14 ++++++++++++++
3 files changed, 51 insertions(+)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index af4e581..3547f1f 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -55,6 +55,16 @@ ENTRY(native_usergs_sysret64)
ENDPROC(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */
+.macro pax_rand_kstack
+#ifdef CONFIG_PAX_RANDKSTACK
+ pushq %rax
+ pushq %r11
+ call pax_randomize_kstack
+ popq %r11
+ popq %rax
+#endif
+.endm
+
.macro TRACE_IRQS_IRETQ
#ifdef CONFIG_TRACE_IRQFLAGS
bt $9, EFLAGS(%rsp) /* interrupts off? */
@@ -225,6 +235,8 @@ entry_SYSCALL_64_fastpath:
testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11)
jnz 1f
+ pax_rand_kstack
+
LOCKDEP_SYS_EXIT
TRACE_IRQS_ON /* user mode is traced as IRQs on */
movq RIP(%rsp), %rcx
@@ -261,6 +273,8 @@ entry_SYSCALL64_slow_path:
call do_syscall_64 /* returns with IRQs disabled */
return_from_SYSCALL_64:
+ pax_rand_kstack
+
TRACE_IRQS_IRETQ /* we're about to change IF */
/*
@@ -449,6 +463,7 @@ ENTRY(ret_from_fork)
2:
movq %rsp, %rdi
call syscall_return_slowpath /* returns with IRQs disabled */
+ pax_rand_kstack
TRACE_IRQS_ON /* user mode is traced as IRQS on */
SWITCH_USER_CR3
SWAPGS
@@ -562,6 +577,7 @@ ret_from_intr:
GLOBAL(retint_user)
mov %rsp,%rdi
call prepare_exit_to_usermode
+ pax_rand_kstack
TRACE_IRQS_IRETQ
SWITCH_USER_CR3
SWAPGS
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 0887d2a..cd76ae8 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -262,7 +262,13 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
struct inactive_task_frame *frame;
struct task_struct *me = current;
+#ifdef CONFIG_PAX_RANDKSTACK
+ /* -16 to start from prev page (c000 -> bff0)
+ to avoid stack overflow after randomizarion */
+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
+#else
p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+#endif
childregs = task_pt_regs(p);
fork_frame = container_of(childregs, struct fork_frame, regs);
frame = &fork_frame->frame;
@@ -647,3 +653,18 @@ unsigned long KSTK_ESP(struct task_struct *task)
{
return task_pt_regs(task)->sp;
}
+
+#ifdef CONFIG_PAX_RANDKSTACK
+void pax_randomize_kstack(void)
+{
+ struct thread_struct *thread = ¤t->thread;
+ unsigned long time;
+
+ if (!randomize_va_space)
+ return;
+
+ time = rdtsc() & 0xFUL;
+ thread->sp0 ^= (time << 4);
+ load_sp0(&per_cpu(cpu_tss, smp_processor_id()), thread);
+}
+#endif
diff --git a/security/Kconfig b/security/Kconfig
index d5641c4..d649311 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -99,6 +99,20 @@ config PAX_RAP
i.e., gcc 4.5 or newer. You may need to install the supporting
headers explicitly in addition to the normal gcc package.
+config PAX_RANDKSTACK
+ bool "Randomize kernel stack base"
+ depends on X86_TSC && X86
+ help
+ By saying Y here the kernel will randomize every task's kernel
+ stack on every system call. This will not only force an attacker
+ to guess it but also prevent him from making use of possible
+ leaked information about it.
+
+ Since the kernel stack is a rather scarce resource, randomization
+ may cause unexpected stack overflows, therefore you should very
+ carefully test your system. Note that once enabled in the kernel
+ configuration, this feature cannot be disabled on a per file basis.
+
endif
source security/keys/Kconfig