CFLAGS_REMOVE_syscall_32.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong
 CFLAGS_REMOVE_syscall_64.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong
 
-OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
-
 CFLAGS_syscall_64.o            += $(call cc-option,-Wno-override-init,)
 CFLAGS_syscall_32.o            += $(call cc-option,-Wno-override-init,)
 obj-y                          := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
 
  * ebp  user stack
  * 0(%ebp) arg6
  */
-SYM_FUNC_START(entry_SYSENTER_compat)
+SYM_CODE_START(entry_SYSENTER_compat)
+       UNWIND_HINT_EMPTY
        /* Interrupts are off on entry. */
        SWAPGS
 
-       /* We are about to clobber %rsp anyway, clobbering here is OK */
-       SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
+       pushq   %rax
+       SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
+       popq    %rax
 
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
        xorl    %r14d, %r14d            /* nospec   r14 */
        pushq   $0                      /* pt_regs->r15 = 0 */
        xorl    %r15d, %r15d            /* nospec   r15 */
+
+       UNWIND_HINT_REGS
+
        cld
 
        /*
        popfq
        jmp     .Lsysenter_flags_fixed
 SYM_INNER_LABEL(__end_entry_SYSENTER_compat, SYM_L_GLOBAL)
-SYM_FUNC_END(entry_SYSENTER_compat)
+SYM_CODE_END(entry_SYSENTER_compat)
 
 /*
  * 32-bit SYSCALL entry.
  * 0(%esp) arg6
  */
 SYM_CODE_START(entry_SYSCALL_compat)
+       UNWIND_HINT_EMPTY
        /* Interrupts are off on entry. */
        swapgs
 
        pushq   $0                      /* pt_regs->r15 = 0 */
        xorl    %r15d, %r15d            /* nospec   r15 */
 
+       UNWIND_HINT_REGS
+
        movq    %rsp, %rdi
        call    do_fast_syscall_32
        /* XEN PV guests always use IRET path */
  * ebp  arg6
  */
 SYM_CODE_START(entry_INT80_compat)
+       UNWIND_HINT_EMPTY
        /*
         * Interrupts are off on entry.
         */
 
        /* Need to switch before accessing the thread stack. */
        SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
+
        /* In the Xen PV case we already run on the thread stack. */
-       ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
+       ALTERNATIVE "", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
+
+       movq    %rsp, %rdi
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
        pushq   6*8(%rdi)               /* regs->ss */
        xorl    %r14d, %r14d            /* nospec   r14 */
        pushq   %r15                    /* pt_regs->r15 */
        xorl    %r15d, %r15d            /* nospec   r15 */
+
+       UNWIND_HINT_REGS
+
        cld
 
        movq    %rsp, %rdi