]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
riscv: Use SYM_*() assembly macros instead of deprecated ones
authorClément Léger <cleger@rivosinc.com>
Tue, 24 Oct 2023 13:26:52 +0000 (15:26 +0200)
committerPalmer Dabbelt <palmer@rivosinc.com>
Mon, 6 Nov 2023 17:42:47 +0000 (09:42 -0800)
ENTRY()/END()/WEAK() macros are deprecated and we should make use of the
new SYM_*() macros [1] for better annotation of symbols. Replace the
deprecated ones with the new ones and fix wrong usage of END()/ENDPROC()
to correctly describe the symbols.

[1] https://docs.kernel.org/core-api/asm-annotations.html

Signed-off-by: Clément Léger <cleger@rivosinc.com>
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
Link: https://lore.kernel.org/r/20231024132655.730417-3-cleger@rivosinc.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
17 files changed:
arch/riscv/kernel/copy-unaligned.S
arch/riscv/kernel/fpu.S
arch/riscv/kernel/head.S
arch/riscv/kernel/hibernate-asm.S
arch/riscv/kernel/mcount-dyn.S
arch/riscv/kernel/mcount.S
arch/riscv/kernel/probes/rethook_trampoline.S
arch/riscv/kernel/suspend_entry.S
arch/riscv/kernel/vdso/flush_icache.S
arch/riscv/kernel/vdso/getcpu.S
arch/riscv/kernel/vdso/rt_sigreturn.S
arch/riscv/kernel/vdso/sys_hwprobe.S
arch/riscv/lib/memcpy.S
arch/riscv/lib/memmove.S
arch/riscv/lib/memset.S
arch/riscv/lib/uaccess.S
arch/riscv/purgatory/entry.S

index cfdecfbaad627153ead8cde6fe9ca7a298c1aa40..2b3d9398c113fbaea3f775b1439058e5a5178c7d 100644 (file)
@@ -9,7 +9,7 @@
 /* void __riscv_copy_words_unaligned(void *, const void *, size_t) */
 /* Performs a memcpy without aligning buffers, using word loads and stores. */
 /* Note: The size is truncated to a multiple of 8 * SZREG */
-ENTRY(__riscv_copy_words_unaligned)
+SYM_FUNC_START(__riscv_copy_words_unaligned)
        andi  a4, a2, ~((8*SZREG)-1)
        beqz  a4, 2f
        add   a3, a1, a4
@@ -36,12 +36,12 @@ ENTRY(__riscv_copy_words_unaligned)
 
 2:
        ret
-END(__riscv_copy_words_unaligned)
+SYM_FUNC_END(__riscv_copy_words_unaligned)
 
 /* void __riscv_copy_bytes_unaligned(void *, const void *, size_t) */
 /* Performs a memcpy without aligning buffers, using only byte accesses. */
 /* Note: The size is truncated to a multiple of 8 */
-ENTRY(__riscv_copy_bytes_unaligned)
+SYM_FUNC_START(__riscv_copy_bytes_unaligned)
        andi a4, a2, ~(8-1)
        beqz a4, 2f
        add  a3, a1, a4
@@ -68,4 +68,4 @@ ENTRY(__riscv_copy_bytes_unaligned)
 
 2:
        ret
-END(__riscv_copy_bytes_unaligned)
+SYM_FUNC_END(__riscv_copy_bytes_unaligned)
index 5dd3161a4dacc0344d3e540ee656685f2d9e6573..2c543f130f9389aa4f1d9fee05f5027d99adaa78 100644 (file)
@@ -19,7 +19,7 @@
 #include <asm/csr.h>
 #include <asm/asm-offsets.h>
 
-ENTRY(__fstate_save)
+SYM_FUNC_START(__fstate_save)
        li  a2,  TASK_THREAD_F0
        add a0, a0, a2
        li t1, SR_FS
@@ -60,9 +60,9 @@ ENTRY(__fstate_save)
        sw t0, TASK_THREAD_FCSR_F0(a0)
        csrc CSR_STATUS, t1
        ret
-ENDPROC(__fstate_save)
+SYM_FUNC_END(__fstate_save)
 
-ENTRY(__fstate_restore)
+SYM_FUNC_START(__fstate_restore)
        li  a2,  TASK_THREAD_F0
        add a0, a0, a2
        li t1, SR_FS
@@ -103,7 +103,7 @@ ENTRY(__fstate_restore)
        fscsr t0
        csrc CSR_STATUS, t1
        ret
-ENDPROC(__fstate_restore)
+SYM_FUNC_END(__fstate_restore)
 
 #define get_f32(which) fmv.x.s a0, which; j 2f
 #define put_f32(which) fmv.s.x which, a1; j 2f
index 0c0432eb57d723b0330da502457e02c7242991bd..b77397432403d9ef028fea6855cdc97aea143d00 100644 (file)
@@ -19,7 +19,7 @@
 #include "efi-header.S"
 
 __HEAD
-ENTRY(_start)
+SYM_CODE_START(_start)
        /*
         * Image header expected by Linux boot-loaders. The image header data
         * structure is described in asm/image.h.
@@ -187,9 +187,9 @@ secondary_start_sbi:
        wfi
        j .Lsecondary_park
 
-END(_start)
+SYM_CODE_END(_start)
 
-ENTRY(_start_kernel)
+SYM_CODE_START(_start_kernel)
        /* Mask all interrupts */
        csrw CSR_IE, zero
        csrw CSR_IP, zero
@@ -348,10 +348,10 @@ ENTRY(_start_kernel)
        tail .Lsecondary_start_common
 #endif /* CONFIG_RISCV_BOOT_SPINWAIT */
 
-END(_start_kernel)
+SYM_CODE_END(_start_kernel)
 
 #ifdef CONFIG_RISCV_M_MODE
-ENTRY(reset_regs)
+SYM_CODE_START_LOCAL(reset_regs)
        li      sp, 0
        li      gp, 0
        li      tp, 0
@@ -449,5 +449,5 @@ ENTRY(reset_regs)
 .Lreset_regs_done_vector:
 #endif /* CONFIG_RISCV_ISA_V */
        ret
-END(reset_regs)
+SYM_CODE_END(reset_regs)
 #endif /* CONFIG_RISCV_M_MODE */
index d698dd7df637ba8ad9263e2312f896c268a58b2d..d040dcf4add453dadaa328b85cd462ae43d6d31a 100644 (file)
@@ -21,7 +21,7 @@
  *
  * Always returns 0
  */
-ENTRY(__hibernate_cpu_resume)
+SYM_FUNC_START(__hibernate_cpu_resume)
        /* switch to hibernated image's page table. */
        csrw CSR_SATP, s0
        sfence.vma
@@ -34,7 +34,7 @@ ENTRY(__hibernate_cpu_resume)
        mv      a0, zero
 
        ret
-END(__hibernate_cpu_resume)
+SYM_FUNC_END(__hibernate_cpu_resume)
 
 /*
  * Prepare to restore the image.
@@ -42,7 +42,7 @@ END(__hibernate_cpu_resume)
  * a1: satp of temporary page tables.
  * a2: cpu_resume.
  */
-ENTRY(hibernate_restore_image)
+SYM_FUNC_START(hibernate_restore_image)
        mv      s0, a0
        mv      s1, a1
        mv      s2, a2
@@ -50,7 +50,7 @@ ENTRY(hibernate_restore_image)
        REG_L   a1, relocated_restore_code
 
        jr      a1
-END(hibernate_restore_image)
+SYM_FUNC_END(hibernate_restore_image)
 
 /*
  * The below code will be executed from a 'safe' page.
@@ -58,7 +58,7 @@ END(hibernate_restore_image)
  * back to the original memory location. Finally, it jumps to __hibernate_cpu_resume()
  * to restore the CPU context.
  */
-ENTRY(hibernate_core_restore_code)
+SYM_FUNC_START(hibernate_core_restore_code)
        /* switch to temp page table. */
        csrw satp, s1
        sfence.vma
@@ -73,4 +73,4 @@ ENTRY(hibernate_core_restore_code)
        bnez    s4, .Lcopy
 
        jr      s2
-END(hibernate_core_restore_code)
+SYM_FUNC_END(hibernate_core_restore_code)
index 669b8697aa38a5ed792ead53d73ce3057c62ab51..58dd96a2a15340ee83c473436a1b2cf25d407c1f 100644 (file)
@@ -82,7 +82,7 @@
        .endm
 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
 
-ENTRY(ftrace_caller)
+SYM_FUNC_START(ftrace_caller)
        SAVE_ABI
 
        addi    a0, t0, -FENTRY_RA_OFFSET
@@ -91,8 +91,7 @@ ENTRY(ftrace_caller)
        mv      a1, ra
        mv      a3, sp
 
-ftrace_call:
-       .global ftrace_call
+SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
        call    ftrace_stub
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -102,16 +101,15 @@ ftrace_call:
 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
        mv      a2, s0
 #endif
-ftrace_graph_call:
-       .global ftrace_graph_call
+SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL)
        call    ftrace_stub
 #endif
        RESTORE_ABI
        jr t0
-ENDPROC(ftrace_caller)
+SYM_FUNC_END(ftrace_caller)
 
 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
-ENTRY(ftrace_regs_caller)
+SYM_FUNC_START(ftrace_regs_caller)
        SAVE_ALL
 
        addi    a0, t0, -FENTRY_RA_OFFSET
@@ -120,8 +118,7 @@ ENTRY(ftrace_regs_caller)
        mv      a1, ra
        mv      a3, sp
 
-ftrace_regs_call:
-       .global ftrace_regs_call
+SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
        call    ftrace_stub
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -131,12 +128,11 @@ ftrace_regs_call:
 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
        mv      a2, s0
 #endif
-ftrace_graph_regs_call:
-       .global ftrace_graph_regs_call
+SYM_INNER_LABEL(ftrace_graph_regs_call, SYM_L_GLOBAL)
        call    ftrace_stub
 #endif
 
        RESTORE_ALL
        jr t0
-ENDPROC(ftrace_regs_caller)
+SYM_FUNC_END(ftrace_regs_caller)
 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
index ab4dd0594fe7bdc0ee83e4a5975c9760829c4c88..b4dd9ed6849e30f13922a5ab4e398f87de984e9b 100644 (file)
@@ -61,7 +61,7 @@ SYM_TYPED_FUNC_START(ftrace_stub_graph)
        ret
 SYM_FUNC_END(ftrace_stub_graph)
 
-ENTRY(return_to_handler)
+SYM_FUNC_START(return_to_handler)
 /*
  * On implementing the frame point test, the ideal way is to compare the
  * s0 (frame pointer, if enabled) on entry and the sp (stack pointer) on return.
@@ -76,11 +76,11 @@ ENTRY(return_to_handler)
        mv      a2, a0
        RESTORE_RET_ABI_STATE
        jalr    a2
-ENDPROC(return_to_handler)
+SYM_FUNC_END(return_to_handler)
 #endif
 
 #ifndef CONFIG_DYNAMIC_FTRACE
-ENTRY(MCOUNT_NAME)
+SYM_FUNC_START(MCOUNT_NAME)
        la      t4, ftrace_stub
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        la      t0, ftrace_graph_return
@@ -126,6 +126,6 @@ ENTRY(MCOUNT_NAME)
        jalr    t5
        RESTORE_ABI_STATE
        ret
-ENDPROC(MCOUNT_NAME)
+SYM_FUNC_END(MCOUNT_NAME)
 #endif
 EXPORT_SYMBOL(MCOUNT_NAME)
index 21bac92a170a9bd43be973f213cb8be3329175ef..f2cd83d9b0f004e0fe6ae64db9425d343a394bf9 100644 (file)
@@ -75,7 +75,7 @@
        REG_L x31, PT_T6(sp)
        .endm
 
-ENTRY(arch_rethook_trampoline)
+SYM_CODE_START(arch_rethook_trampoline)
        addi sp, sp, -(PT_SIZE_ON_STACK)
        save_all_base_regs
 
@@ -90,4 +90,4 @@ ENTRY(arch_rethook_trampoline)
        addi sp, sp, PT_SIZE_ON_STACK
 
        ret
-ENDPROC(arch_rethook_trampoline)
+SYM_CODE_END(arch_rethook_trampoline)
index d5cf8b57577739f5ae615b1e3cbd1b117dbeff19..2d54f309c14059ad901f80448df2ff257f047388 100644 (file)
@@ -16,7 +16,7 @@
        .altmacro
        .option norelax
 
-ENTRY(__cpu_suspend_enter)
+SYM_FUNC_START(__cpu_suspend_enter)
        /* Save registers (except A0 and T0-T6) */
        REG_S   ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
        REG_S   sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
@@ -57,7 +57,7 @@ ENTRY(__cpu_suspend_enter)
 
        /* Return to C code */
        ret
-END(__cpu_suspend_enter)
+SYM_FUNC_END(__cpu_suspend_enter)
 
 SYM_TYPED_FUNC_START(__cpu_resume_enter)
        /* Load the global pointer */
index 82f97d67c23e9bdde94b0d2f655f52d32c8fd6d1..8f884227e8bca7fd3634217e71d4ee4ed122559a 100644 (file)
@@ -8,7 +8,7 @@
 
        .text
 /* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
-ENTRY(__vdso_flush_icache)
+SYM_FUNC_START(__vdso_flush_icache)
        .cfi_startproc
 #ifdef CONFIG_SMP
        li a7, __NR_riscv_flush_icache
@@ -19,4 +19,4 @@ ENTRY(__vdso_flush_icache)
 #endif
        ret
        .cfi_endproc
-ENDPROC(__vdso_flush_icache)
+SYM_FUNC_END(__vdso_flush_icache)
index bb0c05e2ffbae3d6aa3609fc5b5de84630aaa37d..9c1bd531907f2fefda1d0778191073ca6b70df1a 100644 (file)
@@ -8,11 +8,11 @@
 
        .text
 /* int __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused); */
-ENTRY(__vdso_getcpu)
+SYM_FUNC_START(__vdso_getcpu)
        .cfi_startproc
        /* For now, just do the syscall. */
        li a7, __NR_getcpu
        ecall
        ret
        .cfi_endproc
-ENDPROC(__vdso_getcpu)
+SYM_FUNC_END(__vdso_getcpu)
index 10438c7c626acc8034fa22d6765422fbc7b67f0b..3dc022aa8931ad3b3798f4cc492ce795dd7b7bf5 100644 (file)
@@ -7,10 +7,10 @@
 #include <asm/unistd.h>
 
        .text
-ENTRY(__vdso_rt_sigreturn)
+SYM_FUNC_START(__vdso_rt_sigreturn)
        .cfi_startproc
        .cfi_signal_frame
        li a7, __NR_rt_sigreturn
        ecall
        .cfi_endproc
-ENDPROC(__vdso_rt_sigreturn)
+SYM_FUNC_END(__vdso_rt_sigreturn)
index 4e704146c77a092e481b8b532c19b11e3efa82e4..77e57f8305216c466f51979c91899754b4a7b382 100644 (file)
@@ -5,11 +5,11 @@
 #include <asm/unistd.h>
 
 .text
-ENTRY(riscv_hwprobe)
+SYM_FUNC_START(riscv_hwprobe)
        .cfi_startproc
        li a7, __NR_riscv_hwprobe
        ecall
        ret
 
        .cfi_endproc
-ENDPROC(riscv_hwprobe)
+SYM_FUNC_END(riscv_hwprobe)
index 1a40d01a95439e1592b673ad76e5f5b964bbbed3..44e009ec5fef683a3290d57f31239661a5352f9e 100644 (file)
@@ -7,8 +7,7 @@
 #include <asm/asm.h>
 
 /* void *memcpy(void *, const void *, size_t) */
-ENTRY(__memcpy)
-WEAK(memcpy)
+SYM_FUNC_START(__memcpy)
        move t6, a0  /* Preserve return value */
 
        /* Defer to byte-oriented copy for small sizes */
@@ -105,6 +104,7 @@ WEAK(memcpy)
        bltu a1, a3, 5b
 6:
        ret
-END(__memcpy)
+SYM_FUNC_END(__memcpy)
+SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy)
 SYM_FUNC_ALIAS(__pi_memcpy, __memcpy)
 SYM_FUNC_ALIAS(__pi___memcpy, __memcpy)
index 1930b388c3a0c67e61e65337f8a763626b74804c..cb3e2e7ef0baa248d906717523a6f848f591eaf9 100644 (file)
@@ -7,7 +7,6 @@
 #include <asm/asm.h>
 
 SYM_FUNC_START(__memmove)
-SYM_FUNC_START_WEAK(memmove)
        /*
         * Returns
         *   a0 - dest
@@ -312,7 +311,7 @@ SYM_FUNC_START_WEAK(memmove)
 .Lreturn_from_memmove:
        ret
 
-SYM_FUNC_END(memmove)
 SYM_FUNC_END(__memmove)
+SYM_FUNC_ALIAS_WEAK(memmove, __memmove)
 SYM_FUNC_ALIAS(__pi_memmove, __memmove)
 SYM_FUNC_ALIAS(__pi___memmove, __memmove)
index 34c5360c6705c56466cf9890f1a7261e6383a5e3..35f358e70bdb6bf79bda0f366e34c98c6ef5bbc3 100644 (file)
@@ -8,8 +8,7 @@
 #include <asm/asm.h>
 
 /* void *memset(void *, int, size_t) */
-ENTRY(__memset)
-WEAK(memset)
+SYM_FUNC_START(__memset)
        move t0, a0  /* Preserve return value */
 
        /* Defer to byte-oriented fill for small sizes */
@@ -110,4 +109,5 @@ WEAK(memset)
        bltu t0, a3, 5b
 6:
        ret
-END(__memset)
+SYM_FUNC_END(__memset)
+SYM_FUNC_ALIAS_WEAK(memset, __memset)
index 09b47ebacf2e8743bc1cf77be0693b4499748afb..3ab438f30d1328707862134f819e8a74598c6dce 100644 (file)
@@ -10,8 +10,7 @@
        _asm_extable    100b, \lbl
        .endm
 
-ENTRY(__asm_copy_to_user)
-ENTRY(__asm_copy_from_user)
+SYM_FUNC_START(__asm_copy_to_user)
 
        /* Enable access to user memory */
        li t6, SR_SUM
@@ -181,13 +180,13 @@ ENTRY(__asm_copy_from_user)
        csrc CSR_STATUS, t6
        sub a0, t5, a0
        ret
-ENDPROC(__asm_copy_to_user)
-ENDPROC(__asm_copy_from_user)
+SYM_FUNC_END(__asm_copy_to_user)
 EXPORT_SYMBOL(__asm_copy_to_user)
+SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user)
 EXPORT_SYMBOL(__asm_copy_from_user)
 
 
-ENTRY(__clear_user)
+SYM_FUNC_START(__clear_user)
 
        /* Enable access to user memory */
        li t6, SR_SUM
@@ -233,5 +232,5 @@ ENTRY(__clear_user)
        csrc CSR_STATUS, t6
        sub a0, a3, a0
        ret
-ENDPROC(__clear_user)
+SYM_FUNC_END(__clear_user)
 EXPORT_SYMBOL(__clear_user)
index 0194f4554130ae6b89cf5db97a069a65bc2c6fc1..5bcf3af903daa2f9fb2aaf1e57d79121bfcda988 100644 (file)
@@ -7,15 +7,11 @@
  * Author: Li Zhengyu (lizhengyu3@huawei.com)
  *
  */
-
-.macro size, sym:req
-       .size \sym, . - \sym
-.endm
+#include <linux/linkage.h>
 
 .text
 
-.globl purgatory_start
-purgatory_start:
+SYM_CODE_START(purgatory_start)
 
        lla     sp, .Lstack
        mv      s0, a0  /* The hartid of the current hart */
@@ -28,8 +24,7 @@ purgatory_start:
        mv      a1, s1
        ld      a2, riscv_kernel_entry
        jr      a2
-
-size purgatory_start
+SYM_CODE_END(purgatory_start)
 
 .align 4
        .rept   256
@@ -39,9 +34,6 @@ size purgatory_start
 
 .data
 
-.globl riscv_kernel_entry
-riscv_kernel_entry:
-       .quad   0
-size riscv_kernel_entry
+SYM_DATA(riscv_kernel_entry, .quad 0)
 
 .end