]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86/retpoline/entry: Convert entry assembler indirect jumps
authorDavid Woodhouse <dwmw@amazon.co.uk>
Thu, 11 Jan 2018 21:46:28 +0000 (21:46 +0000)
committerJack Vogel <jack.vogel@oracle.com>
Wed, 7 Feb 2018 20:34:33 +0000 (12:34 -0800)
commit 2641f08bb7fc63a636a2b18173221d7040a3512e upstream.

Convert indirect jumps in core 32/64bit entry assembler code to use
non-speculative sequences when CONFIG_RETPOLINE is enabled.

Don't use CALL_NOSPEC in entry_SYSCALL_64_fastpath because the return
address after the 'call' instruction must be *precisely* at the
.Lentry_SYSCALL_64_after_fastpath label for stub_ptregs_64 to work,
and the use of alternatives will mess that up unless we play horrid
games to prepend with NOPs and make the variants the same length. It's
not worth it; in the case where we ALTERNATIVE out the retpoline, the
first instruction at __x86.indirect_thunk.rax is going to be a bare
jmp *%rax anyway.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Arjan van de Ven <arjan@linux.intel.com>
Cc: gnomes@lxorguk.ukuu.org.uk
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: thomas.lendacky@amd.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jikos@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Kees Cook <keescook@google.com>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
Cc: Paul Turner <pjt@google.com>
Link: https://lkml.kernel.org/r/1515707194-20531-7-git-send-email-dwmw@amazon.co.uk
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Razvan Ghitulete <rga@amazon.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
(cherry picked from commit 028083cb02db69237e73950576bc81ac579693dc)
Orabug: 27477743
CVE: CVE-2017-5715
Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Conflicts:
arch/x86/kernel/entry_32.S
          (dmj:
            - patch had arch/x86/entry/entry_32.S
            - extra retpolines needed for sys_call_table)
arch/x86/kernel/entry_64.S
          (dmj: patch had arch/x86/entry/entry_64.S)
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Reviewed-by: Pavel Tatashin <pasha.tatashin@oracle.com>
arch/x86/kernel/entry_32.S
arch/x86/kernel/entry_64.S

index edc8554d1b426742889cb00553d05511db05469a..47230850c36ca37b159b7ab21d55fd751d9c98a2 100644 (file)
@@ -58,6 +58,7 @@
 #include <asm/alternative-asm.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
+#include <asm/nospec-branch.h>
 
 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
 #include <linux/elf-em.h>
@@ -308,7 +309,8 @@ ENTRY(ret_from_kernel_thread)
        pushl_cfi $0x0202               # Reset kernel eflags
        popfl_cfi
        movl PT_EBP(%esp),%eax
-       call *PT_EBX(%esp)
+       movl PT_EBX(%esp), %edx
+       CALL_NOSPEC %edx
        movl $0,PT_EAX(%esp)
        jmp syscall_exit
        CFI_ENDPROC
@@ -427,7 +429,12 @@ sysenter_past_esp:
 sysenter_do_call:
        cmpl $(NR_syscalls), %eax
        jae sysenter_badsys
+#ifdef CONFIG_RETPOLINE
+       movl sys_call_table(,%eax,4), %eax
+       call __x86_indirect_thunk_eax
+#else
        call *sys_call_table(,%eax,4)
+#endif
 sysenter_after_call:
        movl %eax,PT_EAX(%esp)
        LOCKDEP_SYS_EXIT
@@ -502,7 +509,12 @@ ENTRY(system_call)
        cmpl $(NR_syscalls), %eax
        jae syscall_badsys
 syscall_call:
+#ifdef CONFIG_RETPOLINE
+       movl sys_call_table(,%eax,4), %eax
+       call __x86_indirect_thunk_eax
+#else
        call *sys_call_table(,%eax,4)
+#endif
 syscall_after_call:
        movl %eax,PT_EAX(%esp)          # store the return value
 syscall_exit:
@@ -1265,7 +1277,7 @@ error_code:
        movl %ecx, %es
        TRACE_IRQS_OFF
        movl %esp,%eax                  # pt_regs pointer
-       call *%edi
+       CALL_NOSPEC %edi
        jmp ret_from_exception
        CFI_ENDPROC
 END(page_fault)
index f55668e7c2cd49f7422ee5f72c57ca0aff534799..7c31f57d430389f1c046bb6cd6e745361d7b9adf 100644 (file)
@@ -48,6 +48,7 @@
 #include <asm/kaiser.h>
 #include <asm/dtrace_util.h>
 #include <asm/spec_ctrl.h>
+#include <asm/nospec-branch.h>
 #include <linux/err.h>
 
 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
@@ -273,7 +274,12 @@ system_call_fastpath:
 #endif
        ja      1f      /* return -ENOSYS (already in pt_regs->ax) */
        movq %r10,%rcx
+#ifdef CONFIG_RETPOLINE
+       movq sys_call_table(,%rax,8), %rax
+       call __x86_indirect_thunk_rax
+#else
        call *sys_call_table(,%rax,8)
+#endif
        movq %rax,RAX(%rsp)
 1:
 /*
@@ -368,7 +374,12 @@ tracesys_phase2:
 #endif
        ja      1f      /* return -ENOSYS (already in pt_regs->ax) */
        movq %r10,%rcx  /* fixup for C */
+#ifdef CONFIG_RETPOLINE
+       movq sys_call_table(,%rax,8), %rax
+       call __x86_indirect_thunk_rax
+#else
        call *sys_call_table(,%rax,8)
+#endif
        movq %rax,RAX(%rsp)
 1:
        /* Use IRET because user could have changed pt_regs->foo */
@@ -669,7 +680,7 @@ ENTRY(ret_from_fork)
        /* We came from kernel_thread */
        /* nb: we depend on RESTORE_EXTRA_REGS above */
        movq %rbp, %rdi
-       call *%rbx
+       CALL_NOSPEC %rbx
        movl $0, RAX(%rsp)
        RESTORE_EXTRA_REGS
        jmp int_ret_from_sys_call