From: David Woodhouse Date: Thu, 11 Jan 2018 21:46:28 +0000 (+0000) Subject: x86/retpoline/entry: Convert entry assembler indirect jumps X-Git-Tag: v4.1.12-124.31.3~1183 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=72bc40fe02ff9dbbec1c8566aa35ef09595a0757;p=users%2Fjedix%2Flinux-maple.git x86/retpoline/entry: Convert entry assembler indirect jumps commit 2641f08bb7fc63a636a2b18173221d7040a3512e upstream. Convert indirect jumps in core 32/64bit entry assembler code to use non-speculative sequences when CONFIG_RETPOLINE is enabled. Don't use CALL_NOSPEC in entry_SYSCALL_64_fastpath because the return address after the 'call' instruction must be *precisely* at the .Lentry_SYSCALL_64_after_fastpath label for stub_ptregs_64 to work, and the use of alternatives will mess that up unless we play horrid games to prepend with NOPs and make the variants the same length. It's not worth it; in the case where we ALTERNATIVE out the retpoline, the first instruction at __x86.indirect_thunk.rax is going to be a bare jmp *%rax anyway. Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Acked-by: Arjan van de Ven Cc: gnomes@lxorguk.ukuu.org.uk Cc: Rik van Riel Cc: Andi Kleen Cc: Josh Poimboeuf Cc: thomas.lendacky@amd.com Cc: Peter Zijlstra Cc: Linus Torvalds Cc: Jiri Kosina Cc: Andy Lutomirski Cc: Dave Hansen Cc: Kees Cook Cc: Tim Chen Cc: Greg Kroah-Hartman Cc: Paul Turner Link: https://lkml.kernel.org/r/1515707194-20531-7-git-send-email-dwmw@amazon.co.uk Signed-off-by: David Woodhouse Signed-off-by: Razvan Ghitulete Signed-off-by: Greg Kroah-Hartman (cherry picked from commit 028083cb02db69237e73950576bc81ac579693dc) Orabug: 27477743 CVE: CVE-2017-5715 Signed-off-by: Daniel Jordan Conflicts: arch/x86/kernel/entry_32.S (dmj: - patch had arch/x86/entry/entry_32.S - extra retpolines needed for sys_call_table) arch/x86/kernel/entry_64.S (dmj: patch had arch/x86/entry/entry_64.S) Signed-off-by: Konrad Rzeszutek Wilk Reviewed-by: Darren Kenny Reviewed-by: Pavel Tatashin --- diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index edc8554d1b426..47230850c36ca 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -58,6 +58,7 @@ #include #include #include +#include /* Avoid __ASSEMBLER__'ifying just for this. */ #include @@ -308,7 +309,8 @@ ENTRY(ret_from_kernel_thread) pushl_cfi $0x0202 # Reset kernel eflags popfl_cfi movl PT_EBP(%esp),%eax - call *PT_EBX(%esp) + movl PT_EBX(%esp), %edx + CALL_NOSPEC %edx movl $0,PT_EAX(%esp) jmp syscall_exit CFI_ENDPROC @@ -427,7 +429,12 @@ sysenter_past_esp: sysenter_do_call: cmpl $(NR_syscalls), %eax jae sysenter_badsys +#ifdef CONFIG_RETPOLINE + movl sys_call_table(,%eax,4), %eax + call __x86_indirect_thunk_eax +#else call *sys_call_table(,%eax,4) +#endif sysenter_after_call: movl %eax,PT_EAX(%esp) LOCKDEP_SYS_EXIT @@ -502,7 +509,12 @@ ENTRY(system_call) cmpl $(NR_syscalls), %eax jae syscall_badsys syscall_call: +#ifdef CONFIG_RETPOLINE + movl sys_call_table(,%eax,4), %eax + call __x86_indirect_thunk_eax +#else call *sys_call_table(,%eax,4) +#endif syscall_after_call: movl %eax,PT_EAX(%esp) # store the return value syscall_exit: @@ -1265,7 +1277,7 @@ error_code: movl %ecx, %es TRACE_IRQS_OFF movl %esp,%eax # pt_regs pointer - call *%edi + CALL_NOSPEC %edi jmp ret_from_exception CFI_ENDPROC END(page_fault) diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index f55668e7c2cd4..7c31f57d43038 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -48,6 +48,7 @@ #include #include #include +#include #include /* Avoid __ASSEMBLER__'ifying just for this. */ @@ -273,7 +274,12 @@ system_call_fastpath: #endif ja 1f /* return -ENOSYS (already in pt_regs->ax) */ movq %r10,%rcx +#ifdef CONFIG_RETPOLINE + movq sys_call_table(,%rax,8), %rax + call __x86_indirect_thunk_rax +#else call *sys_call_table(,%rax,8) +#endif movq %rax,RAX(%rsp) 1: /* @@ -368,7 +374,12 @@ tracesys_phase2: #endif ja 1f /* return -ENOSYS (already in pt_regs->ax) */ movq %r10,%rcx /* fixup for C */ +#ifdef CONFIG_RETPOLINE + movq sys_call_table(,%rax,8), %rax + call __x86_indirect_thunk_rax +#else call *sys_call_table(,%rax,8) +#endif movq %rax,RAX(%rsp) 1: /* Use IRET because user could have changed pt_regs->foo */ @@ -669,7 +680,7 @@ ENTRY(ret_from_fork) /* We came from kernel_thread */ /* nb: we depend on RESTORE_EXTRA_REGS above */ movq %rbp, %rdi - call *%rbx + CALL_NOSPEC %rbx movl $0, RAX(%rsp) RESTORE_EXTRA_REGS jmp int_ret_from_sys_call