* branch to what would be the reset vector. It must be executed with the
  * flat identity mapping.
  */
-ENTRY(__cpu_soft_restart)
+SYM_CODE_START(__cpu_soft_restart)
        /* Clear sctlr_el1 flags. */
        mrs     x12, sctlr_el1
        mov_q   x13, SCTLR_ELx_FLAGS
        mov     x1, x3                          // arg1
        mov     x2, x4                          // arg2
        br      x8
-ENDPROC(__cpu_soft_restart)
+SYM_CODE_END(__cpu_soft_restart)
 
 .popsection
 
 
 #include <linux/linkage.h>
 
-ENTRY(__efi_rt_asm_wrapper)
+SYM_FUNC_START(__efi_rt_asm_wrapper)
        stp     x29, x30, [sp, #-32]!
        mov     x29, sp
 
        b.ne    0f
        ret
 0:     b       efi_handle_corrupted_x18        // tail call
-ENDPROC(__efi_rt_asm_wrapper)
+SYM_FUNC_END(__efi_rt_asm_wrapper)
 
  *
  * x0 - pointer to struct fpsimd_state
  */
-ENTRY(fpsimd_save_state)
+SYM_FUNC_START(fpsimd_save_state)
        fpsimd_save x0, 8
        ret
-ENDPROC(fpsimd_save_state)
+SYM_FUNC_END(fpsimd_save_state)
 
 /*
  * Load the FP registers.
  *
  * x0 - pointer to struct fpsimd_state
  */
-ENTRY(fpsimd_load_state)
+SYM_FUNC_START(fpsimd_load_state)
        fpsimd_restore x0, 8
        ret
-ENDPROC(fpsimd_load_state)
+SYM_FUNC_END(fpsimd_load_state)
 
 #ifdef CONFIG_ARM64_SVE
-ENTRY(sve_save_state)
+SYM_FUNC_START(sve_save_state)
        sve_save 0, x1, 2
        ret
-ENDPROC(sve_save_state)
+SYM_FUNC_END(sve_save_state)
 
-ENTRY(sve_load_state)
+SYM_FUNC_START(sve_load_state)
        sve_load 0, x1, x2, 3, x4
        ret
-ENDPROC(sve_load_state)
+SYM_FUNC_END(sve_load_state)
 
-ENTRY(sve_get_vl)
+SYM_FUNC_START(sve_get_vl)
        _sve_rdvl       0, 1
        ret
-ENDPROC(sve_get_vl)
+SYM_FUNC_END(sve_get_vl)
 #endif /* CONFIG_ARM64_SVE */
 
  * x5: physical address of a  zero page that remains zero after resume
  */
 .pushsection    ".hibernate_exit.text", "ax"
-ENTRY(swsusp_arch_suspend_exit)
+SYM_CODE_START(swsusp_arch_suspend_exit)
        /*
         * We execute from ttbr0, change ttbr1 to our copied linear map tables
         * with a break-before-make via the zero page
        cbz     x24, 3f         /* Do we need to re-initialise EL2? */
        hvc     #0
 3:     ret
-ENDPROC(swsusp_arch_suspend_exit)
+SYM_CODE_END(swsusp_arch_suspend_exit)
 
 /*
  * Restore the hyp stub.
  *
  * x24: The physical address of __hyp_stub_vectors
  */
-el1_sync:
+SYM_CODE_START_LOCAL(el1_sync)
        msr     vbar_el2, x24
        eret
-ENDPROC(el1_sync)
+SYM_CODE_END(el1_sync)
 
 .macro invalid_vector  label
-\label:
+SYM_CODE_START_LOCAL(\label)
        b \label
-ENDPROC(\label)
+SYM_CODE_END(\label)
 .endm
 
        invalid_vector  el2_sync_invalid
 
 /* el2 vectors - switch el2 here while we restore the memory image. */
        .align 11
-ENTRY(hibernate_el2_vectors)
+SYM_CODE_START(hibernate_el2_vectors)
        ventry  el2_sync_invalid                // Synchronous EL2t
        ventry  el2_irq_invalid                 // IRQ EL2t
        ventry  el2_fiq_invalid                 // FIQ EL2t
        ventry  el1_irq_invalid                 // IRQ 32-bit EL1
        ventry  el1_fiq_invalid                 // FIQ 32-bit EL1
        ventry  el1_error_invalid               // Error 32-bit EL1
-END(hibernate_el2_vectors)
+SYM_CODE_END(hibernate_el2_vectors)
 
 .popsection
 
 
        .align 11
 
-ENTRY(__hyp_stub_vectors)
+SYM_CODE_START(__hyp_stub_vectors)
        ventry  el2_sync_invalid                // Synchronous EL2t
        ventry  el2_irq_invalid                 // IRQ EL2t
        ventry  el2_fiq_invalid                 // FIQ EL2t
        ventry  el1_irq_invalid                 // IRQ 32-bit EL1
        ventry  el1_fiq_invalid                 // FIQ 32-bit EL1
        ventry  el1_error_invalid               // Error 32-bit EL1
-ENDPROC(__hyp_stub_vectors)
+SYM_CODE_END(__hyp_stub_vectors)
 
        .align 11
 
-el1_sync:
+SYM_CODE_START_LOCAL(el1_sync)
        cmp     x0, #HVC_SET_VECTORS
        b.ne    2f
        msr     vbar_el2, x1
 
 9:     mov     x0, xzr
        eret
-ENDPROC(el1_sync)
+SYM_CODE_END(el1_sync)
 
 .macro invalid_vector  label
-\label:
+SYM_CODE_START_LOCAL(\label)
        b \label
-ENDPROC(\label)
+SYM_CODE_END(\label)
 .endm
 
        invalid_vector  el2_sync_invalid
  * initialisation entry point.
  */
 
-ENTRY(__hyp_set_vectors)
+SYM_FUNC_START(__hyp_set_vectors)
        mov     x1, x0
        mov     x0, #HVC_SET_VECTORS
        hvc     #0
        ret
-ENDPROC(__hyp_set_vectors)
+SYM_FUNC_END(__hyp_set_vectors)
 
-ENTRY(__hyp_reset_vectors)
+SYM_FUNC_START(__hyp_reset_vectors)
        mov     x0, #HVC_RESET_VECTORS
        hvc     #0
        ret
-ENDPROC(__hyp_reset_vectors)
+SYM_FUNC_END(__hyp_reset_vectors)
 
        ldp x28, x29, [sp, #S_X28]
        .endm
 
-ENTRY(kretprobe_trampoline)
+SYM_CODE_START(kretprobe_trampoline)
        sub sp, sp, #S_FRAME_SIZE
 
        save_all_base_regs
        add sp, sp, #S_FRAME_SIZE
        ret
 
-ENDPROC(kretprobe_trampoline)
+SYM_CODE_END(kretprobe_trampoline)
 
 
 #include <linux/linkage.h>
 
-ENTRY(absolute_data64)
+SYM_FUNC_START(absolute_data64)
        ldr     x0, 0f
        ret
 0:     .quad   sym64_abs
-ENDPROC(absolute_data64)
+SYM_FUNC_END(absolute_data64)
 
-ENTRY(absolute_data32)
+SYM_FUNC_START(absolute_data32)
        ldr     w0, 0f
        ret
 0:     .long   sym32_abs
-ENDPROC(absolute_data32)
+SYM_FUNC_END(absolute_data32)
 
-ENTRY(absolute_data16)
+SYM_FUNC_START(absolute_data16)
        adr     x0, 0f
        ldrh    w0, [x0]
        ret
 0:     .short  sym16_abs, 0
-ENDPROC(absolute_data16)
+SYM_FUNC_END(absolute_data16)
 
-ENTRY(signed_movw)
+SYM_FUNC_START(signed_movw)
        movz    x0, #:abs_g2_s:sym64_abs
        movk    x0, #:abs_g1_nc:sym64_abs
        movk    x0, #:abs_g0_nc:sym64_abs
        ret
-ENDPROC(signed_movw)
+SYM_FUNC_END(signed_movw)
 
-ENTRY(unsigned_movw)
+SYM_FUNC_START(unsigned_movw)
        movz    x0, #:abs_g3:sym64_abs
        movk    x0, #:abs_g2_nc:sym64_abs
        movk    x0, #:abs_g1_nc:sym64_abs
        movk    x0, #:abs_g0_nc:sym64_abs
        ret
-ENDPROC(unsigned_movw)
+SYM_FUNC_END(unsigned_movw)
 
        .align  12
        .space  0xff8
-ENTRY(relative_adrp)
+SYM_FUNC_START(relative_adrp)
        adrp    x0, sym64_rel
        add     x0, x0, #:lo12:sym64_rel
        ret
-ENDPROC(relative_adrp)
+SYM_FUNC_END(relative_adrp)
 
        .align  12
        .space  0xffc
-ENTRY(relative_adrp_far)
+SYM_FUNC_START(relative_adrp_far)
        adrp    x0, memstart_addr
        add     x0, x0, #:lo12:memstart_addr
        ret
-ENDPROC(relative_adrp_far)
+SYM_FUNC_END(relative_adrp_far)
 
-ENTRY(relative_adr)
+SYM_FUNC_START(relative_adr)
        adr     x0, sym64_rel
        ret
-ENDPROC(relative_adr)
+SYM_FUNC_END(relative_adr)
 
-ENTRY(relative_data64)
+SYM_FUNC_START(relative_data64)
        adr     x1, 0f
        ldr     x0, [x1]
        add     x0, x0, x1
        ret
 0:     .quad   sym64_rel - .
-ENDPROC(relative_data64)
+SYM_FUNC_END(relative_data64)
 
-ENTRY(relative_data32)
+SYM_FUNC_START(relative_data32)
        adr     x1, 0f
        ldr     w0, [x1]
        add     x0, x0, x1
        ret
 0:     .long   sym64_rel - .
-ENDPROC(relative_data32)
+SYM_FUNC_END(relative_data32)
 
-ENTRY(relative_data16)
+SYM_FUNC_START(relative_data16)
        adr     x1, 0f
        ldrsh   w0, [x1]
        add     x0, x0, x1
        ret
 0:     .short  sym64_rel - ., 0
-ENDPROC(relative_data16)
+SYM_FUNC_END(relative_data16)
 
  * control_code_page, a special page which has been set up to be preserved
  * during the copy operation.
  */
-ENTRY(arm64_relocate_new_kernel)
+SYM_CODE_START(arm64_relocate_new_kernel)
 
        /* Setup the list loop variables. */
        mov     x18, x2                         /* x18 = dtb address */
        mov     x3, xzr
        br      x17
 
-ENDPROC(arm64_relocate_new_kernel)
+SYM_CODE_END(arm64_relocate_new_kernel)
 
 .align 3       /* To keep the 64-bit values below naturally aligned. */
 
 
  *
  *  x0 = struct sleep_stack_data area
  */
-ENTRY(__cpu_suspend_enter)
+SYM_FUNC_START(__cpu_suspend_enter)
        stp     x29, lr, [x0, #SLEEP_STACK_DATA_CALLEE_REGS]
        stp     x19, x20, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+16]
        stp     x21, x22, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+32]
        ldp     x29, lr, [sp], #16
        mov     x0, #1
        ret
-ENDPROC(__cpu_suspend_enter)
+SYM_FUNC_END(__cpu_suspend_enter)
 
        .pushsection ".idmap.text", "awx"
-ENTRY(cpu_resume)
+SYM_CODE_START(cpu_resume)
        bl      el2_setup               // if in EL2 drop to EL1 cleanly
        mov     x0, #ARM64_CPU_RUNTIME
        bl      __cpu_setup
        bl      __enable_mmu
        ldr     x8, =_cpu_resume
        br      x8
-ENDPROC(cpu_resume)
+SYM_CODE_END(cpu_resume)
        .ltorg
        .popsection
 
-ENTRY(_cpu_resume)
+SYM_FUNC_START(_cpu_resume)
        mrs     x1, mpidr_el1
        adr_l   x8, mpidr_hash          // x8 = struct mpidr_hash virt address
 
        ldp     x29, lr, [x29]
        mov     x0, #0
        ret
-ENDPROC(_cpu_resume)
+SYM_FUNC_END(_cpu_resume)
 
  *               unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
  *               struct arm_smccc_quirk *quirk)
  */
-ENTRY(__arm_smccc_smc)
+SYM_FUNC_START(__arm_smccc_smc)
        SMCCC   smc
-ENDPROC(__arm_smccc_smc)
+SYM_FUNC_END(__arm_smccc_smc)
 EXPORT_SYMBOL(__arm_smccc_smc)
 
 /*
  *               unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
  *               struct arm_smccc_quirk *quirk)
  */
-ENTRY(__arm_smccc_hvc)
+SYM_FUNC_START(__arm_smccc_hvc)
        SMCCC   hvc
-ENDPROC(__arm_smccc_hvc)
+SYM_FUNC_END(__arm_smccc_hvc)
 EXPORT_SYMBOL(__arm_smccc_hvc)