From 07525c4e9e9371a71456f1111da0623639f6ab58 Mon Sep 17 00:00:00 2001
From: Alexey Makhalov <amakhalov@vmware.com>
Date: Sat, 4 Feb 2017 04:15:14 +0000
Subject: [PATCH 3/3] Added PAX_RANDKSTACK

---
 arch/x86/entry/entry_64.S    | 16 ++++++++++++++++
 arch/x86/kernel/process_64.c | 21 +++++++++++++++++++++
 security/Kconfig             | 14 ++++++++++++++
 3 files changed, 51 insertions(+)

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index be9df51..7f18cd3 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -54,6 +54,16 @@ ENTRY(native_usergs_sysret64)
 ENDPROC(native_usergs_sysret64)
 #endif /* CONFIG_PARAVIRT */
 
+.macro pax_rand_kstack
+#ifdef CONFIG_PAX_RANDKSTACK
+	pushq	%rax
+	pushq	%r11
+	call	pax_randomize_kstack
+	popq	%r11
+	popq	%rax
+#endif
+.endm
+
 .macro TRACE_IRQS_FLAGS flags:req
 #ifdef CONFIG_TRACE_IRQFLAGS
 	bt	$9, \flags		/* interrupts off? */
@@ -227,6 +237,8 @@ entry_SYSCALL_64_fastpath:
 	testl	$_TIF_ALLWORK_MASK, TASK_TI_flags(%r11)
 	jnz	1f
 
+	pax_rand_kstack
+
 	LOCKDEP_SYS_EXIT
 	TRACE_IRQS_ON		/* user mode is traced as IRQs on */
 	movq	RIP(%rsp), %rcx
@@ -255,6 +267,8 @@ entry_SYSCALL64_slow_path:
 	call	do_syscall_64		/* returns with IRQs disabled */
 
 return_from_SYSCALL_64:
+	pax_rand_kstack
+
 	RESTORE_EXTRA_REGS
 	TRACE_IRQS_IRETQ		/* we're about to change IF */
 
@@ -427,6 +441,7 @@ ENTRY(ret_from_fork)
 2:
 	movq	%rsp, %rdi
 	call	syscall_return_slowpath	/* returns with IRQs disabled */
+	pax_rand_kstack
 	TRACE_IRQS_ON			/* user mode is traced as IRQS on */
 	SWAPGS
 	jmp	restore_regs_and_iret
@@ -538,6 +553,7 @@ ret_from_intr:
 GLOBAL(retint_user)
 	mov	%rsp,%rdi
 	call	prepare_exit_to_usermode
+	pax_rand_kstack
 	TRACE_IRQS_IRETQ
 	SWAPGS
 	jmp	restore_regs_and_iret
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 0887d2a..cd76ae8 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -262,7 +262,13 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
 	struct inactive_task_frame *frame;
 	struct task_struct *me = current;
 
+#ifdef CONFIG_PAX_RANDKSTACK
+	/* -16 to start from prev page (c000 -> bff0)
+           to avoid stack overflow after randomizarion */
+	p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
+#else
 	p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+#endif
 	childregs = task_pt_regs(p);
 	fork_frame = container_of(childregs, struct fork_frame, regs);
 	frame = &fork_frame->frame;
@@ -647,3 +653,18 @@ unsigned long KSTK_ESP(struct task_struct *task)
 {
 	return task_pt_regs(task)->sp;
 }
+
+#ifdef CONFIG_PAX_RANDKSTACK
+void pax_randomize_kstack(void)
+{
+	struct thread_struct *thread = &current->thread;
+	unsigned long time;
+
+	if (!randomize_va_space)
+		return;
+
+	time = rdtsc() & 0xFUL;
+	thread->sp0 ^= (time << 4);
+	load_sp0(&per_cpu(cpu_tss, smp_processor_id()), thread);
+}
+#endif
diff --git a/security/Kconfig b/security/Kconfig
index c18980a..78fb7e1 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -99,6 +99,20 @@ config PAX_RAP
 	  i.e., gcc 4.5 or newer.  You may need to install the supporting
 	  headers explicitly in addition to the normal gcc package.
 
+config PAX_RANDKSTACK
+	bool "Randomize kernel stack base"
+	depends on X86_TSC && X86
+	help
+	  By saying Y here the kernel will randomize every task's kernel
+	  stack on every system call.  This will not only force an attacker
+	  to guess it but also prevent him from making use of possible
+	  leaked information about it.
+
+	  Since the kernel stack is a rather scarce resource, randomization
+	  may cause unexpected stack overflows, therefore you should very
+	  carefully test your system.  Note that once enabled in the kernel
+	  configuration, this feature cannot be disabled on a per file basis.
+
 endif
 
 source security/keys/Kconfig
-- 
2.8.1