pop     %rbx
        pop     %rbp
-       ret
+       RET
 SYM_FUNC_END(__efi64_thunk)
 
        .code32
 
 2:     popl    %edi                            // restore callee-save registers
        popl    %ebx
        leave
-       ret
+       RET
 SYM_FUNC_END(efi32_pe_entry)
 
        .section ".rodata"
 
        pop     %ecx
        pop     %ebx
-       ret
+       RET
 SYM_FUNC_END(startup32_set_idt_entry)
 #endif
 
        movl    %eax, rva(boot32_idt_desc+2)(%ebp)
        lidt    rva(boot32_idt_desc)(%ebp)
 #endif
-       ret
+       RET
 SYM_FUNC_END(startup32_load_idt)
 
 /*
        popl    %ebx
        popl    %eax
 #endif
-       ret
+       RET
 SYM_FUNC_END(startup32_check_sev_cbit)
 
 /*
 
 
 #endif /* CONFIG_AMD_MEM_ENCRYPT */
 
-       ret
+       RET
 SYM_FUNC_END(get_sev_encryption_bit)
 
 /**
        /* All good - return success */
        xorl    %eax, %eax
 1:
-       ret
+       RET
 2:
        movl    $-1, %eax
        jmp     1b
 #endif
 
        xor     %rax, %rax
-       ret
+       RET
 SYM_FUNC_END(set_sev_encryption_mask)
 
        .data
 
        pxor T0, MSG
 
 .Lld_partial_8:
-       ret
+       RET
 SYM_FUNC_END(__load_partial)
 
 /*
        mov %r10b, (%r9)
 
 .Lst_partial_1:
-       ret
+       RET
 SYM_FUNC_END(__store_partial)
 
 /*
        movdqu STATE4, 0x40(STATEP)
 
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(crypto_aegis128_aesni_init)
 
 /*
        movdqu STATE3, 0x30(STATEP)
        movdqu STATE4, 0x40(STATEP)
        FRAME_END
-       ret
+       RET
 
 .Lad_out_1:
        movdqu STATE4, 0x00(STATEP)
        movdqu STATE2, 0x30(STATEP)
        movdqu STATE3, 0x40(STATEP)
        FRAME_END
-       ret
+       RET
 
 .Lad_out_2:
        movdqu STATE3, 0x00(STATEP)
        movdqu STATE1, 0x30(STATEP)
        movdqu STATE2, 0x40(STATEP)
        FRAME_END
-       ret
+       RET
 
 .Lad_out_3:
        movdqu STATE2, 0x00(STATEP)
        movdqu STATE0, 0x30(STATEP)
        movdqu STATE1, 0x40(STATEP)
        FRAME_END
-       ret
+       RET
 
 .Lad_out_4:
        movdqu STATE1, 0x00(STATEP)
        movdqu STATE4, 0x30(STATEP)
        movdqu STATE0, 0x40(STATEP)
        FRAME_END
-       ret
+       RET
 
 .Lad_out:
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(crypto_aegis128_aesni_ad)
 
 .macro encrypt_block a s0 s1 s2 s3 s4 i
        movdqu STATE2, 0x30(STATEP)
        movdqu STATE3, 0x40(STATEP)
        FRAME_END
-       ret
+       RET
 
 .Lenc_out_1:
        movdqu STATE3, 0x00(STATEP)
        movdqu STATE1, 0x30(STATEP)
        movdqu STATE2, 0x40(STATEP)
        FRAME_END
-       ret
+       RET
 
 .Lenc_out_2:
        movdqu STATE2, 0x00(STATEP)
        movdqu STATE0, 0x30(STATEP)
        movdqu STATE1, 0x40(STATEP)
        FRAME_END
-       ret
+       RET
 
 .Lenc_out_3:
        movdqu STATE1, 0x00(STATEP)
        movdqu STATE4, 0x30(STATEP)
        movdqu STATE0, 0x40(STATEP)
        FRAME_END
-       ret
+       RET
 
 .Lenc_out_4:
        movdqu STATE0, 0x00(STATEP)
        movdqu STATE3, 0x30(STATEP)
        movdqu STATE4, 0x40(STATEP)
        FRAME_END
-       ret
+       RET
 
 .Lenc_out:
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(crypto_aegis128_aesni_enc)
 
 /*
        movdqu STATE3, 0x40(STATEP)
 
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(crypto_aegis128_aesni_enc_tail)
 
 .macro decrypt_block a s0 s1 s2 s3 s4 i
        movdqu STATE2, 0x30(STATEP)
        movdqu STATE3, 0x40(STATEP)
        FRAME_END
-       ret
+       RET
 
 .Ldec_out_1:
        movdqu STATE3, 0x00(STATEP)
        movdqu STATE1, 0x30(STATEP)
        movdqu STATE2, 0x40(STATEP)
        FRAME_END
-       ret
+       RET
 
 .Ldec_out_2:
        movdqu STATE2, 0x00(STATEP)
        movdqu STATE0, 0x30(STATEP)
        movdqu STATE1, 0x40(STATEP)
        FRAME_END
-       ret
+       RET
 
 .Ldec_out_3:
        movdqu STATE1, 0x00(STATEP)
        movdqu STATE4, 0x30(STATEP)
        movdqu STATE0, 0x40(STATEP)
        FRAME_END
-       ret
+       RET
 
 .Ldec_out_4:
        movdqu STATE0, 0x00(STATEP)
        movdqu STATE3, 0x30(STATEP)
        movdqu STATE4, 0x40(STATEP)
        FRAME_END
-       ret
+       RET
 
 .Ldec_out:
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(crypto_aegis128_aesni_dec)
 
 /*
        movdqu STATE3, 0x40(STATEP)
 
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(crypto_aegis128_aesni_dec_tail)
 
 /*
        movdqu MSG, (%rsi)
 
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(crypto_aegis128_aesni_final)
 
        /* return updated IV */
        vpshufb xbyteswap, xcounter, xcounter
        vmovdqu xcounter, (p_iv)
-       ret
+       RET
 .endm
 
 /*
 
        GCM_ENC_DEC dec
        GCM_COMPLETE arg10, arg11
        FUNC_RESTORE
-       ret
+       RET
 SYM_FUNC_END(aesni_gcm_dec)
 
 
 
        GCM_COMPLETE arg10, arg11
        FUNC_RESTORE
-       ret
+       RET
 SYM_FUNC_END(aesni_gcm_enc)
 
 /*****************************************************************************
        FUNC_SAVE
        GCM_INIT %arg3, %arg4,%arg5, %arg6
        FUNC_RESTORE
-       ret
+       RET
 SYM_FUNC_END(aesni_gcm_init)
 
 /*****************************************************************************
        FUNC_SAVE
        GCM_ENC_DEC enc
        FUNC_RESTORE
-       ret
+       RET
 SYM_FUNC_END(aesni_gcm_enc_update)
 
 /*****************************************************************************
        FUNC_SAVE
        GCM_ENC_DEC dec
        FUNC_RESTORE
-       ret
+       RET
 SYM_FUNC_END(aesni_gcm_dec_update)
 
 /*****************************************************************************
        FUNC_SAVE
        GCM_COMPLETE %arg3 %arg4
        FUNC_RESTORE
-       ret
+       RET
 SYM_FUNC_END(aesni_gcm_finalize)
 
 #endif
        pxor %xmm1, %xmm0
        movaps %xmm0, (TKEYP)
        add $0x10, TKEYP
-       ret
+       RET
 SYM_FUNC_END(_key_expansion_256a)
 SYM_FUNC_END_ALIAS(_key_expansion_128)
 
        shufps $0b01001110, %xmm2, %xmm1
        movaps %xmm1, 0x10(TKEYP)
        add $0x20, TKEYP
-       ret
+       RET
 SYM_FUNC_END(_key_expansion_192a)
 
 SYM_FUNC_START_LOCAL(_key_expansion_192b)
 
        movaps %xmm0, (TKEYP)
        add $0x10, TKEYP
-       ret
+       RET
 SYM_FUNC_END(_key_expansion_192b)
 
 SYM_FUNC_START_LOCAL(_key_expansion_256b)
        pxor %xmm1, %xmm2
        movaps %xmm2, (TKEYP)
        add $0x10, TKEYP
-       ret
+       RET
 SYM_FUNC_END(_key_expansion_256b)
 
 /*
        popl KEYP
 #endif
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(aesni_set_key)
 
 /*
        popl KEYP
 #endif
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(aesni_enc)
 
 /*
        aesenc KEY, STATE
        movaps 0x70(TKEYP), KEY
        aesenclast KEY, STATE
-       ret
+       RET
 SYM_FUNC_END(_aesni_enc1)
 
 /*
        aesenclast KEY, STATE2
        aesenclast KEY, STATE3
        aesenclast KEY, STATE4
-       ret
+       RET
 SYM_FUNC_END(_aesni_enc4)
 
 /*
        popl KEYP
 #endif
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(aesni_dec)
 
 /*
        aesdec KEY, STATE
        movaps 0x70(TKEYP), KEY
        aesdeclast KEY, STATE
-       ret
+       RET
 SYM_FUNC_END(_aesni_dec1)
 
 /*
        aesdeclast KEY, STATE2
        aesdeclast KEY, STATE3
        aesdeclast KEY, STATE4
-       ret
+       RET
 SYM_FUNC_END(_aesni_dec4)
 
 /*
        popl LEN
 #endif
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(aesni_ecb_enc)
 
 /*
        popl LEN
 #endif
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(aesni_ecb_dec)
 
 /*
        popl IVP
 #endif
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(aesni_cbc_enc)
 
 /*
        popl IVP
 #endif
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(aesni_cbc_dec)
 
 /*
        popl IVP
 #endif
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(aesni_cts_cbc_enc)
 
 /*
        popl IVP
 #endif
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(aesni_cts_cbc_dec)
 
 .pushsection .rodata
        mov $1, TCTR_LOW
        movq TCTR_LOW, INC
        movq CTR, TCTR_LOW
-       ret
+       RET
 SYM_FUNC_END(_aesni_inc_init)
 
 /*
 .Linc_low:
        movaps CTR, IV
        pshufb BSWAP_MASK, IV
-       ret
+       RET
 SYM_FUNC_END(_aesni_inc)
 
 /*
        movups IV, (IVP)
 .Lctr_enc_just_ret:
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(aesni_ctr_enc)
 
 #endif
        popl IVP
 #endif
        FRAME_END
-       ret
+       RET
 
 .Lxts_enc_1x:
        add $64, LEN
        popl IVP
 #endif
        FRAME_END
-       ret
+       RET
 
 .Lxts_dec_1x:
        add $64, LEN
 
         FUNC_SAVE
         INIT GHASH_MUL_AVX, PRECOMPUTE_AVX
         FUNC_RESTORE
-        ret
+        RET
 SYM_FUNC_END(aesni_gcm_init_avx_gen2)
 
 ###############################################################################
         # must be 192
         GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 11
         FUNC_RESTORE
-        ret
+        RET
 key_128_enc_update:
         GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 9
         FUNC_RESTORE
-        ret
+        RET
 key_256_enc_update:
         GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 13
         FUNC_RESTORE
-        ret
+        RET
 SYM_FUNC_END(aesni_gcm_enc_update_avx_gen2)
 
 ###############################################################################
         # must be 192
         GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 11
         FUNC_RESTORE
-        ret
+        RET
 key_128_dec_update:
         GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 9
         FUNC_RESTORE
-        ret
+        RET
 key_256_dec_update:
         GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 13
         FUNC_RESTORE
-        ret
+        RET
 SYM_FUNC_END(aesni_gcm_dec_update_avx_gen2)
 
 ###############################################################################
         # must be 192
         GCM_COMPLETE GHASH_MUL_AVX, 11, arg3, arg4
         FUNC_RESTORE
-        ret
+        RET
 key_128_finalize:
         GCM_COMPLETE GHASH_MUL_AVX, 9, arg3, arg4
         FUNC_RESTORE
-        ret
+        RET
 key_256_finalize:
         GCM_COMPLETE GHASH_MUL_AVX, 13, arg3, arg4
         FUNC_RESTORE
-        ret
+        RET
 SYM_FUNC_END(aesni_gcm_finalize_avx_gen2)
 
 ###############################################################################
         FUNC_SAVE
         INIT GHASH_MUL_AVX2, PRECOMPUTE_AVX2
         FUNC_RESTORE
-        ret
+        RET
 SYM_FUNC_END(aesni_gcm_init_avx_gen4)
 
 ###############################################################################
         # must be 192
         GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 11
         FUNC_RESTORE
-       ret
+       RET
 key_128_enc_update4:
         GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 9
         FUNC_RESTORE
-       ret
+       RET
 key_256_enc_update4:
         GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 13
         FUNC_RESTORE
-       ret
+       RET
 SYM_FUNC_END(aesni_gcm_enc_update_avx_gen4)
 
 ###############################################################################
         # must be 192
         GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 11
         FUNC_RESTORE
-        ret
+        RET
 key_128_dec_update4:
         GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 9
         FUNC_RESTORE
-        ret
+        RET
 key_256_dec_update4:
         GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 13
         FUNC_RESTORE
-        ret
+        RET
 SYM_FUNC_END(aesni_gcm_dec_update_avx_gen4)
 
 ###############################################################################
         # must be 192
         GCM_COMPLETE GHASH_MUL_AVX2, 11, arg3, arg4
         FUNC_RESTORE
-        ret
+        RET
 key_128_finalize4:
         GCM_COMPLETE GHASH_MUL_AVX2, 9, arg3, arg4
         FUNC_RESTORE
-        ret
+        RET
 key_256_finalize4:
         GCM_COMPLETE GHASH_MUL_AVX2, 13, arg3, arg4
         FUNC_RESTORE
-        ret
+        RET
 SYM_FUNC_END(aesni_gcm_finalize_avx_gen4)
 
        movdqu          %xmm1,0x10(%rdi)
        movdqu          %xmm14,0x20(%rdi)
 .Lendofloop:
-       ret
+       RET
 SYM_FUNC_END(blake2s_compress_ssse3)
 
 #ifdef CONFIG_AS_AVX512
        vmovdqu         %xmm1,0x10(%rdi)
        vmovdqu         %xmm4,0x20(%rdi)
        vzeroupper
-       retq
+       RET
 SYM_FUNC_END(blake2s_compress_avx512)
 #endif /* CONFIG_AS_AVX512 */
 
        jnz .L__enc_xor;
 
        write_block();
-       ret;
+       RET;
 .L__enc_xor:
        xor_block();
-       ret;
+       RET;
 SYM_FUNC_END(__blowfish_enc_blk)
 
 SYM_FUNC_START(blowfish_dec_blk)
 
        movq %r11, %r12;
 
-       ret;
+       RET;
 SYM_FUNC_END(blowfish_dec_blk)
 
 /**********************************************************************
 
        popq %rbx;
        popq %r12;
-       ret;
+       RET;
 
 .L__enc_xor4:
        xor_block4();
 
        popq %rbx;
        popq %r12;
-       ret;
+       RET;
 SYM_FUNC_END(__blowfish_enc_blk_4way)
 
 SYM_FUNC_START(blowfish_dec_blk_4way)
        popq %rbx;
        popq %r12;
 
-       ret;
+       RET;
 SYM_FUNC_END(blowfish_dec_blk_4way)
 
        roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
                  %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
                  %rcx, (%r9));
-       ret;
+       RET;
 SYM_FUNC_END(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
 
 .align 8
        roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
                  %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
                  %rax, (%r9));
-       ret;
+       RET;
 SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
 
 /*
                    %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
 
        FRAME_END
-       ret;
+       RET;
 
 .align 8
 .Lenc_max32:
                    %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
 
        FRAME_END
-       ret;
+       RET;
 
 .align 8
 .Ldec_max32:
                     %xmm8, %rsi);
 
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(camellia_ecb_enc_16way)
 
 SYM_FUNC_START(camellia_ecb_dec_16way)
                     %xmm8, %rsi);
 
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(camellia_ecb_dec_16way)
 
 SYM_FUNC_START(camellia_cbc_dec_16way)
                     %xmm8, %rsi);
 
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(camellia_cbc_dec_16way)
 
        roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
                  %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
                  %rcx, (%r9));
-       ret;
+       RET;
 SYM_FUNC_END(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
 
 .align 8
        roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
                  %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
                  %rax, (%r9));
-       ret;
+       RET;
 SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
 
 /*
                    %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
 
        FRAME_END
-       ret;
+       RET;
 
 .align 8
 .Lenc_max32:
                    %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
 
        FRAME_END
-       ret;
+       RET;
 
 .align 8
 .Ldec_max32:
        vzeroupper;
 
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(camellia_ecb_enc_32way)
 
 SYM_FUNC_START(camellia_ecb_dec_32way)
        vzeroupper;
 
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(camellia_ecb_dec_32way)
 
 SYM_FUNC_START(camellia_cbc_dec_32way)
 
        addq $(16 * 32), %rsp;
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(camellia_cbc_dec_32way)
 
        enc_outunpack(mov, RT1);
 
        movq RR12, %r12;
-       ret;
+       RET;
 
 .L__enc_xor:
        enc_outunpack(xor, RT1);
 
        movq RR12, %r12;
-       ret;
+       RET;
 SYM_FUNC_END(__camellia_enc_blk)
 
 SYM_FUNC_START(camellia_dec_blk)
        dec_outunpack();
 
        movq RR12, %r12;
-       ret;
+       RET;
 SYM_FUNC_END(camellia_dec_blk)
 
 /**********************************************************************
 
        movq RR12, %r12;
        popq %rbx;
-       ret;
+       RET;
 
 .L__enc2_xor:
        enc_outunpack2(xor, RT2);
 
        movq RR12, %r12;
        popq %rbx;
-       ret;
+       RET;
 SYM_FUNC_END(__camellia_enc_blk_2way)
 
 SYM_FUNC_START(camellia_dec_blk_2way)
 
        movq RR12, %r12;
        movq RXOR, %rbx;
-       ret;
+       RET;
 SYM_FUNC_END(camellia_dec_blk_2way)
 
        outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
        outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
 
-       ret;
+       RET;
 SYM_FUNC_END(__cast5_enc_blk16)
 
 .align 16
        outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
        outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
 
-       ret;
+       RET;
 
 .L__skip_dec:
        vpsrldq $4, RKR, RKR;
 
        popq %r15;
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(cast5_ecb_enc_16way)
 
 SYM_FUNC_START(cast5_ecb_dec_16way)
 
        popq %r15;
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(cast5_ecb_dec_16way)
 
 SYM_FUNC_START(cast5_cbc_dec_16way)
        popq %r15;
        popq %r12;
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(cast5_cbc_dec_16way)
 
 SYM_FUNC_START(cast5_ctr_16way)
        popq %r15;
        popq %r12;
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(cast5_ctr_16way)
 
        outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
        outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
 
-       ret;
+       RET;
 SYM_FUNC_END(__cast6_enc_blk8)
 
 .align 8
        outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
        outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
 
-       ret;
+       RET;
 SYM_FUNC_END(__cast6_dec_blk8)
 
 SYM_FUNC_START(cast6_ecb_enc_8way)
 
        popq %r15;
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(cast6_ecb_enc_8way)
 
 SYM_FUNC_START(cast6_ecb_dec_8way)
 
        popq %r15;
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(cast6_ecb_dec_8way)
 
 SYM_FUNC_START(cast6_cbc_dec_8way)
        popq %r15;
        popq %r12;
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(cast6_cbc_dec_8way)
 
 
 .Ldone2:
        vzeroupper
-       ret
+       RET
 
 .Lxorpart2:
        # xor remaining bytes from partial register into output
 
 .Ldone4:
        vzeroupper
-       ret
+       RET
 
 .Lxorpart4:
        # xor remaining bytes from partial register into output
 .Ldone8:
        vzeroupper
        lea             -8(%r10),%rsp
-       ret
+       RET
 
 .Lxorpart8:
        # xor remaining bytes from partial register into output
 
 
 .Ldone2:
        vzeroupper
-       ret
+       RET
 
 .Lxorpart2:
        # xor remaining bytes from partial register into output
 
 .Ldone4:
        vzeroupper
-       ret
+       RET
 
 .Lxorpart4:
        # xor remaining bytes from partial register into output
 
 .Ldone8:
        vzeroupper
-       ret
+       RET
 
 .Lxorpart8:
        # xor remaining bytes from partial register into output
 
        sub             $2,%r8d
        jnz             .Ldoubleround
 
-       ret
+       RET
 SYM_FUNC_END(chacha_permute)
 
 SYM_FUNC_START(chacha_block_xor_ssse3)
 
 .Ldone:
        FRAME_END
-       ret
+       RET
 
 .Lxorpart:
        # xor remaining bytes from partial register into output
        movdqu          %xmm3,0x10(%rsi)
 
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(hchacha_block_ssse3)
 
 SYM_FUNC_START(chacha_4block_xor_ssse3)
 
 .Ldone4:
        lea             -8(%r10),%rsp
-       ret
+       RET
 
 .Lxorpart4:
        # xor remaining bytes from partial register into output
 
        pxor    %xmm2, %xmm1
        pextrd  $0x01, %xmm1, %eax
 
-       ret
+       RET
 SYM_FUNC_END(crc32_pclmul_le_16)
 
        popq    %rsi
        popq    %rdi
        popq    %rbx
-        ret
+        RET
 SYM_FUNC_END(crc_pcl)
 
 .section       .rodata, "a", @progbits
 
        # Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of xmm0.
 
        pextrw  $0, %xmm0, %eax
-       ret
+       RET
 
 .align 16
 .Lless_than_256_bytes:
 
        popq %r12;
        popq %rbx;
 
-       ret;
+       RET;
 SYM_FUNC_END(des3_ede_x86_64_crypt_blk)
 
 /***********************************************************************
        popq %r12;
        popq %rbx;
 
-       ret;
+       RET;
 SYM_FUNC_END(des3_ede_x86_64_crypt_blk_3way)
 
 .section       .rodata, "a", @progbits
 
        psrlq $1, T2
        pxor T2, T1
        pxor T1, DATA
-       ret
+       RET
 SYM_FUNC_END(__clmul_gf128mul_ble)
 
 /* void clmul_ghash_mul(char *dst, const u128 *shash) */
        pshufb BSWAP, DATA
        movups DATA, (%rdi)
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(clmul_ghash_mul)
 
 /*
        movups DATA, (%rdi)
 .Lupdate_just_ret:
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(clmul_ghash_update)
 
        vpaddq          T1, T0, T0
        vpaddq          T4, T0, T0
        vmovdqu         T0, (HASH)
-       ret
+       RET
 SYM_FUNC_END(nh_avx2)
 
        paddq           PASS2_SUMS, T1
        movdqu          T0, 0x00(HASH)
        movdqu          T1, 0x10(HASH)
-       ret
+       RET
 SYM_FUNC_END(nh_sse2)
 
        write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
        write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
 
-       ret;
+       RET;
 SYM_FUNC_END(__serpent_enc_blk8_avx)
 
 .align 8
        write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
        write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
 
-       ret;
+       RET;
 SYM_FUNC_END(__serpent_dec_blk8_avx)
 
 SYM_FUNC_START(serpent_ecb_enc_8way_avx)
        store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(serpent_ecb_enc_8way_avx)
 
 SYM_FUNC_START(serpent_ecb_dec_8way_avx)
        store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
 
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(serpent_ecb_dec_8way_avx)
 
 SYM_FUNC_START(serpent_cbc_dec_8way_avx)
        store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
 
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(serpent_cbc_dec_8way_avx)
 
        write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
        write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
 
-       ret;
+       RET;
 SYM_FUNC_END(__serpent_enc_blk16)
 
 .align 8
        write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
        write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
 
-       ret;
+       RET;
 SYM_FUNC_END(__serpent_dec_blk16)
 
 SYM_FUNC_START(serpent_ecb_enc_16way)
        vzeroupper;
 
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(serpent_ecb_enc_16way)
 
 SYM_FUNC_START(serpent_ecb_dec_16way)
        vzeroupper;
 
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(serpent_ecb_dec_16way)
 
 SYM_FUNC_START(serpent_cbc_dec_16way)
        vzeroupper;
 
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(serpent_cbc_dec_16way)
 
 
        write_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
 
-       ret;
+       RET;
 
 .L__enc_xor4:
        xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
 
-       ret;
+       RET;
 SYM_FUNC_END(__serpent_enc_blk_4way)
 
 SYM_FUNC_START(serpent_dec_blk_4way)
        movl arg_dst(%esp), %eax;
        write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA);
 
-       ret;
+       RET;
 SYM_FUNC_END(serpent_dec_blk_4way)
 
        write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
        write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
 
-       ret;
+       RET;
 
 .L__enc_xor8:
        xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
        xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
 
-       ret;
+       RET;
 SYM_FUNC_END(__serpent_enc_blk_8way)
 
 SYM_FUNC_START(serpent_dec_blk_8way)
        write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
        write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
 
-       ret;
+       RET;
 SYM_FUNC_END(serpent_dec_blk_8way)
 
        pop     %r12
        pop     %rbx
 
-       ret
+       RET
 
        SYM_FUNC_END(\name)
 .endm
 
        mov             %rbp, %rsp
        pop             %rbp
 
-       ret
+       RET
 SYM_FUNC_END(sha1_ni_transform)
 
 .section       .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16
 
        pop     %rbp
        pop     %r12
        pop     %rbx
-       ret
+       RET
 
        SYM_FUNC_END(\name)
 .endm
 
        popq    %r13
        popq    %r12
        popq    %rbx
-       ret
+       RET
 SYM_FUNC_END(sha256_transform_avx)
 
 .section       .rodata.cst256.K256, "aM", @progbits, 256
 
        popq    %r13
        popq    %r12
        popq    %rbx
-       ret
+       RET
 SYM_FUNC_END(sha256_transform_rorx)
 
 .section       .rodata.cst512.K256, "aM", @progbits, 512
 
        popq    %r12
        popq    %rbx
 
-       ret
+       RET
 SYM_FUNC_END(sha256_transform_ssse3)
 
 .section       .rodata.cst256.K256, "aM", @progbits, 256
 
 
 .Ldone_hash:
 
-       ret
+       RET
 SYM_FUNC_END(sha256_ni_transform)
 
 .section       .rodata.cst256.K256, "aM", @progbits, 256
 
        pop     %rbx
 
 nowork:
-       ret
+       RET
 SYM_FUNC_END(sha512_transform_avx)
 
 ########################################################################
 
        pop     %r12
        pop     %rbx
 
-       ret
+       RET
 SYM_FUNC_END(sha512_transform_rorx)
 
 ########################################################################
 
        pop     %rbx
 
 nowork:
-       ret
+       RET
 SYM_FUNC_END(sha512_transform_ssse3)
 
 ########################################################################
 
 .Lblk4_store_output_done:
        vzeroall;
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(sm4_aesni_avx_crypt4)
 
 .align 8
        vpshufb RTMP2, RB3, RB3;
 
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(__sm4_crypt_blk8)
 
 /*
 .Lblk8_store_output_done:
        vzeroall;
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(sm4_aesni_avx_crypt8)
 
 /*
 
        vzeroall;
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(sm4_aesni_avx_ctr_enc_blk8)
 
 /*
 
        vzeroall;
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8)
 
 /*
 
        vzeroall;
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(sm4_aesni_avx_cfb_dec_blk8)
 
        vpshufb RTMP2, RB3, RB3;
 
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(__sm4_crypt_blk16)
 
 #define inc_le128(x, minus_one, tmp) \
 
        vzeroall;
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(sm4_aesni_avx2_ctr_enc_blk16)
 
 /*
 
        vzeroall;
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk16)
 
 /*
 
        vzeroall;
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(sm4_aesni_avx2_cfb_dec_blk16)
 
        outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
        outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
 
-       ret;
+       RET;
 SYM_FUNC_END(__twofish_enc_blk8)
 
 .align 8
        outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
        outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
 
-       ret;
+       RET;
 SYM_FUNC_END(__twofish_dec_blk8)
 
 SYM_FUNC_START(twofish_ecb_enc_8way)
        store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
 
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(twofish_ecb_enc_8way)
 
 SYM_FUNC_START(twofish_ecb_dec_8way)
        store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(twofish_ecb_dec_8way)
 
 SYM_FUNC_START(twofish_cbc_dec_8way)
        popq %r12;
 
        FRAME_END
-       ret;
+       RET;
 SYM_FUNC_END(twofish_cbc_dec_8way)
 
        pop     %ebx
        pop     %ebp
        mov     $1,     %eax
-       ret
+       RET
 SYM_FUNC_END(twofish_enc_blk)
 
 SYM_FUNC_START(twofish_dec_blk)
        pop     %ebx
        pop     %ebp
        mov     $1,     %eax
-       ret
+       RET
 SYM_FUNC_END(twofish_dec_blk)
 
        popq %rbx;
        popq %r12;
        popq %r13;
-       ret;
+       RET;
 
 .L__enc_xor3:
        outunpack_enc3(xor);
        popq %rbx;
        popq %r12;
        popq %r13;
-       ret;
+       RET;
 SYM_FUNC_END(__twofish_enc_blk_3way)
 
 SYM_FUNC_START(twofish_dec_blk_3way)
        popq %rbx;
        popq %r12;
        popq %r13;
-       ret;
+       RET;
 SYM_FUNC_END(twofish_dec_blk_3way)
 
 
        popq    R1
        movl    $1,%eax
-       ret
+       RET
 SYM_FUNC_END(twofish_enc_blk)
 
 SYM_FUNC_START(twofish_dec_blk)
 
        popq    R1
        movl    $1,%eax
-       ret
+       RET
 SYM_FUNC_END(twofish_dec_blk)
 
        popl    %eax
 
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(schedule_tail_wrapper)
 .popsection
 
 
 2:     ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
        swapgs
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(asm_load_gs_index)
 EXPORT_SYMBOL(asm_load_gs_index)
 
         * is needed here.
         */
        SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx
-       ret
+       RET
 
 .Lparanoid_entry_checkgs:
        /* EBX = 1 -> kernel GSBASE active, no restore required */
 .Lparanoid_kernel_gsbase:
 
        FENCE_SWAPGS_KERNEL_ENTRY
-       ret
+       RET
 SYM_CODE_END(paranoid_entry)
 
 /*
        movq    %rax, %rsp                      /* switch stack */
        ENCODE_FRAME_POINTER
        pushq   %r12
-       ret
+       RET
 
        /*
         * There are two places in the kernel that can potentially fault with
         */
 .Lerror_entry_done_lfence:
        FENCE_SWAPGS_KERNEL_ENTRY
-       ret
+       RET
 
 .Lbstep_iret:
        /* Fix truncated RIP */
 
        popl %edx
        popl %ecx
        popl %eax
-       ret
+       RET
        _ASM_NOKPROBE(\name)
 SYM_CODE_END(\name)
        .endm
 
        popq %rsi
        popq %rdi
        popq %rbp
-       ret
+       RET
        _ASM_NOKPROBE(__thunk_restore)
 SYM_CODE_END(__thunk_restore)
 #endif
 
        popl    %ecx
        CFI_RESTORE             ecx
        CFI_ADJUST_CFA_OFFSET   -4
-       ret
+       RET
        CFI_ENDPROC
 
        .size __kernel_vsyscall,.-__kernel_vsyscall
 
        pop     %rbx
        leave
        .cfi_def_cfa            %rsp, 8
-       ret
+       RET
 
        /* The out-of-line code runs with the pre-leave stack frame. */
        .cfi_def_cfa            %rbp, 16
 
 
        mov $__NR_gettimeofday, %rax
        syscall
-       ret
+       RET
 
        .balign 1024, 0xcc
        mov $__NR_time, %rax
        syscall
-       ret
+       RET
 
        .balign 1024, 0xcc
        mov $__NR_getcpu, %rax
        syscall
-       ret
+       RET
 
        .balign 4096, 0xcc
 
 
        popl    saved_context_eflags
 
        movl    $ret_point, saved_eip
-       ret
+       RET
 
 
 restore_registers:
        movl    saved_context_edi, %edi
        pushl   saved_context_eflags
        popfl
-       ret
+       RET
 
 SYM_CODE_START(do_suspend_lowlevel)
        call    save_processor_state
 ret_point:
        call    restore_registers
        call    restore_processor_state
-       ret
+       RET
 SYM_CODE_END(do_suspend_lowlevel)
 
 .data
 
 #endif
 
 SYM_FUNC_START(__fentry__)
-       ret
+       RET
 SYM_FUNC_END(__fentry__)
 EXPORT_SYMBOL(__fentry__)
 
 
 /* This is weak to keep gas from relaxing the jumps */
 SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK)
-       ret
+       RET
 SYM_CODE_END(ftrace_caller)
 
 SYM_CODE_START(ftrace_regs_caller)
        popl    %edx
        popl    %ecx
        popl    %eax
-       ret
+       RET
 SYM_CODE_END(ftrace_graph_caller)
 
 .globl return_to_handler
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 SYM_FUNC_START(__fentry__)
-       retq
+       RET
 SYM_FUNC_END(__fentry__)
 EXPORT_SYMBOL(__fentry__)
 
 SYM_FUNC_START(ftrace_epilogue)
 /*
  * This is weak to keep gas from relaxing the jumps.
- * It is also used to copy the retq for trampolines.
+ * It is also used to copy the RET for trampolines.
  */
 SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK)
        UNWIND_HINT_FUNC
-       retq
+       RET
 SYM_FUNC_END(ftrace_epilogue)
 
 SYM_FUNC_START(ftrace_regs_caller)
        jnz trace
 
 SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL)
-       retq
+       RET
 
 trace:
        /* save_mcount_regs fills in first two parameters */
 
 __INIT
 setup_once:
        andl $0,setup_once_ref  /* Once is enough, thanks */
-       ret
+       RET
 
 SYM_FUNC_START(early_idt_handler_array)
        # 36(%esp) %eflags
 
 SYM_FUNC_START(native_save_fl)
        pushf
        pop %_ASM_AX
-       ret
+       RET
 SYM_FUNC_END(native_save_fl)
 .popsection
 EXPORT_SYMBOL(native_save_fl)
 
        movl    %edi, %eax
        addl    $(identity_mapped - relocate_kernel), %eax
        pushl   %eax
-       ret
+       RET
 SYM_CODE_END(relocate_kernel)
 
 SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
        xorl    %edx, %edx
        xorl    %esi, %esi
        xorl    %ebp, %ebp
-       ret
+       RET
 1:
        popl    %edx
        movl    CP_PA_SWAP_PAGE(%edi), %esp
        movl    %edi, %eax
        addl    $(virtual_mapped - relocate_kernel), %eax
        pushl   %eax
-       ret
+       RET
 SYM_CODE_END(identity_mapped)
 
 SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
        popl    %edi
        popl    %esi
        popl    %ebx
-       ret
+       RET
 SYM_CODE_END(virtual_mapped)
 
        /* Do the copies */
        popl    %edi
        popl    %ebx
        popl    %ebp
-       ret
+       RET
 SYM_CODE_END(swap_pages)
 
        .globl kexec_control_code_size
 
        /* jump to identity mapped page */
        addq    $(identity_mapped - relocate_kernel), %r8
        pushq   %r8
-       ret
+       RET
 SYM_CODE_END(relocate_kernel)
 
 SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
        xorl    %r14d, %r14d
        xorl    %r15d, %r15d
 
-       ret
+       RET
 
 1:
        popq    %rdx
        call    swap_pages
        movq    $virtual_mapped, %rax
        pushq   %rax
-       ret
+       RET
 SYM_CODE_END(identity_mapped)
 
 SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
        popq    %r12
        popq    %rbp
        popq    %rbx
-       ret
+       RET
 SYM_CODE_END(virtual_mapped)
 
        /* Do the copies */
        lea     PAGE_SIZE(%rax), %rsi
        jmp     0b
 3:
-       ret
+       RET
 SYM_CODE_END(swap_pages)
 
        .globl kexec_control_code_size
 
 #endif
        /* Return page-table pointer */
        movq    %rdi, %rax
-       ret
+       RET
 SYM_FUNC_END(sev_verify_cbit)
 
 .Lverify_cpu_no_longmode:
        popf                            # Restore caller passed flags
        movl $1,%eax
-       ret
+       RET
 .Lverify_cpu_sse_ok:
        popf                            # Restore caller passed flags
        xorl %eax, %eax
-       ret
+       RET
 SYM_FUNC_END(verify_cpu)
 
        pop %edi
 #endif
        pop %_ASM_BP
-       ret
+       RET
 
 3:     cmpb $0, kvm_rebooting
        jne 2b
        pop %edi
 #endif
        pop %_ASM_BP
-       ret
+       RET
 
 3:     cmpb $0, kvm_rebooting
        jne 2b
 
        je 2f
 
 1:     vmresume
-       ret
+       RET
 
 2:     vmlaunch
-       ret
+       RET
 
 3:     cmpb $0, kvm_rebooting
        je 4f
-       ret
+       RET
 4:     ud2
 
        _ASM_EXTABLE(1b, 3b)
        pop %_ASM_AX
 .Lvmexit_skip_rsb:
 #endif
-       ret
+       RET
 SYM_FUNC_END(vmx_vmexit)
 
 /**
        pop %edi
 #endif
        pop %_ASM_BP
-       ret
+       RET
 
        /* VM-Fail.  Out-of-line to avoid a taken Jcc after VM-Exit. */
 2:     mov $1, %eax
        pop %_ASM_AX
        pop %_ASM_BP
 
-       ret
+       RET
 SYM_FUNC_END(vmread_error_trampoline)
 
 SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff)
         */
        mov %_ASM_BP, %_ASM_SP
        pop %_ASM_BP
-       ret
+       RET
 SYM_FUNC_END(vmx_do_interrupt_nmi_irqoff)
 
 
 #define RET_IRQ_RESTORE \
        IRQ_RESTORE v; \
-       ret
+       RET
 
 #define v %ecx
 BEGIN_IRQ_SAVE(read)
 
 
 SYM_FUNC_START(atomic64_read_cx8)
        read64 %ecx
-       ret
+       RET
 SYM_FUNC_END(atomic64_read_cx8)
 
 SYM_FUNC_START(atomic64_set_cx8)
        cmpxchg8b (%esi)
        jne 1b
 
-       ret
+       RET
 SYM_FUNC_END(atomic64_set_cx8)
 
 SYM_FUNC_START(atomic64_xchg_cx8)
        cmpxchg8b (%esi)
        jne 1b
 
-       ret
+       RET
 SYM_FUNC_END(atomic64_xchg_cx8)
 
 .macro addsub_return func ins insc
        popl %esi
        popl %ebx
        popl %ebp
-       ret
+       RET
 SYM_FUNC_END(atomic64_\func\()_return_cx8)
 .endm
 
        movl %ebx, %eax
        movl %ecx, %edx
        popl %ebx
-       ret
+       RET
 SYM_FUNC_END(atomic64_\func\()_return_cx8)
 .endm
 
        movl %ebx, %eax
        movl %ecx, %edx
        popl %ebx
-       ret
+       RET
 SYM_FUNC_END(atomic64_dec_if_positive_cx8)
 
 SYM_FUNC_START(atomic64_add_unless_cx8)
        addl $8, %esp
        popl %ebx
        popl %ebp
-       ret
+       RET
 4:
        cmpl %edx, 4(%esp)
        jne 2b
        movl $1, %eax
 3:
        popl %ebx
-       ret
+       RET
 SYM_FUNC_END(atomic64_inc_not_zero_cx8)
 
 8:
        popl %ebx
        popl %esi
-       ret
+       RET
 SYM_FUNC_END(csum_partial)
 
 #else
 90: 
        popl %ebx
        popl %esi
-       ret
+       RET
 SYM_FUNC_END(csum_partial)
                                
 #endif
        popl %esi
        popl %edi
        popl %ecx                       # equivalent to addl $4,%esp
-       ret     
+       RET
 SYM_FUNC_END(csum_partial_copy_generic)
 
 #else
        popl %esi
        popl %edi
        popl %ebx
-       ret
+       RET
 SYM_FUNC_END(csum_partial_copy_generic)
                                
 #undef ROUND
 
        movl $4096/8,%ecx
        xorl %eax,%eax
        rep stosq
-       ret
+       RET
 SYM_FUNC_END(clear_page_rep)
 EXPORT_SYMBOL_GPL(clear_page_rep)
 
        leaq    64(%rdi),%rdi
        jnz     .Lloop
        nop
-       ret
+       RET
 SYM_FUNC_END(clear_page_orig)
 EXPORT_SYMBOL_GPL(clear_page_orig)
 
        movl $4096,%ecx
        xorl %eax,%eax
        rep stosb
-       ret
+       RET
 SYM_FUNC_END(clear_page_erms)
 EXPORT_SYMBOL_GPL(clear_page_erms)
 
 
        popfq
        mov $1, %al
-       ret
+       RET
 
 .Lnot_same:
        popfq
        xor %al,%al
-       ret
+       RET
 
 SYM_FUNC_END(this_cpu_cmpxchg16b_emu)
 
        movl %ecx, 4(%esi)
 
        popfl
-       ret
+       RET
 
 .Lnot_same:
        movl  (%esi), %eax
        movl 4(%esi), %edx
 
        popfl
-       ret
+       RET
 
 SYM_FUNC_END(cmpxchg8b_emu)
 EXPORT_SYMBOL(cmpxchg8b_emu)
 
 .L_done_memcpy_trap:
        xorl %eax, %eax
 .L_done:
-       ret
+       RET
 SYM_FUNC_END(copy_mc_fragile)
 
        .section .fixup, "ax"
        rep movsb
        /* Copy successful. Return zero */
        xorl %eax, %eax
-       ret
+       RET
 SYM_FUNC_END(copy_mc_enhanced_fast_string)
 
        .section .fixup, "ax"
         * user-copy routines.
         */
        movq %rcx, %rax
-       ret
+       RET
 
        .previous
 
 
        ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
        movl    $4096/8, %ecx
        rep     movsq
-       ret
+       RET
 SYM_FUNC_END(copy_page)
 EXPORT_SYMBOL(copy_page)
 
        movq    (%rsp), %rbx
        movq    1*8(%rsp), %r12
        addq    $2*8, %rsp
-       ret
+       RET
 SYM_FUNC_END(copy_page_regs)
 
        jnz 21b
 23:    xor %eax,%eax
        ASM_CLAC
-       ret
+       RET
 
        .section .fixup,"ax"
 30:    shll $6,%ecx
        movsb
        xorl %eax,%eax
        ASM_CLAC
-       ret
+       RET
 
        .section .fixup,"ax"
 11:    leal (%rdx,%rcx,8),%ecx
        movsb
        xorl %eax,%eax
        ASM_CLAC
-       ret
+       RET
 
        .section .fixup,"ax"
 12:    movl %ecx,%edx          /* ecx is zerorest also */
 1:     rep movsb
 2:     mov %ecx,%eax
        ASM_CLAC
-       ret
+       RET
 
        _ASM_EXTABLE_CPY(1b, 2b)
 SYM_CODE_END(.Lcopy_user_handle_tail)
        xorl %eax,%eax
        ASM_CLAC
        sfence
-       ret
+       RET
 
        .section .fixup,"ax"
 .L_fixup_4x8b_copy:
 
        movq 3*8(%rsp), %r13
        movq 4*8(%rsp), %r15
        addq $5*8, %rsp
-       ret
+       RET
 .Lshort:
        movl %ecx, %r10d
        jmp  .L1
 
 1:     movzbl (%_ASM_AX),%edx
        xor %eax,%eax
        ASM_CLAC
-       ret
+       RET
 SYM_FUNC_END(__get_user_1)
 EXPORT_SYMBOL(__get_user_1)
 
 2:     movzwl (%_ASM_AX),%edx
        xor %eax,%eax
        ASM_CLAC
-       ret
+       RET
 SYM_FUNC_END(__get_user_2)
 EXPORT_SYMBOL(__get_user_2)
 
 3:     movl (%_ASM_AX),%edx
        xor %eax,%eax
        ASM_CLAC
-       ret
+       RET
 SYM_FUNC_END(__get_user_4)
 EXPORT_SYMBOL(__get_user_4)
 
 4:     movq (%_ASM_AX),%rdx
        xor %eax,%eax
        ASM_CLAC
-       ret
+       RET
 #else
        LOAD_TASK_SIZE_MINUS_N(7)
        cmp %_ASM_DX,%_ASM_AX
 5:     movl 4(%_ASM_AX),%ecx
        xor %eax,%eax
        ASM_CLAC
-       ret
+       RET
 #endif
 SYM_FUNC_END(__get_user_8)
 EXPORT_SYMBOL(__get_user_8)
 6:     movzbl (%_ASM_AX),%edx
        xor %eax,%eax
        ASM_CLAC
-       ret
+       RET
 SYM_FUNC_END(__get_user_nocheck_1)
 EXPORT_SYMBOL(__get_user_nocheck_1)
 
 7:     movzwl (%_ASM_AX),%edx
        xor %eax,%eax
        ASM_CLAC
-       ret
+       RET
 SYM_FUNC_END(__get_user_nocheck_2)
 EXPORT_SYMBOL(__get_user_nocheck_2)
 
 8:     movl (%_ASM_AX),%edx
        xor %eax,%eax
        ASM_CLAC
-       ret
+       RET
 SYM_FUNC_END(__get_user_nocheck_4)
 EXPORT_SYMBOL(__get_user_nocheck_4)
 
 #endif
        xor %eax,%eax
        ASM_CLAC
-       ret
+       RET
 SYM_FUNC_END(__get_user_nocheck_8)
 EXPORT_SYMBOL(__get_user_nocheck_8)
 
 bad_get_user:
        xor %edx,%edx
        mov $(-EFAULT),%_ASM_AX
-       ret
+       RET
 SYM_CODE_END(.Lbad_get_user_clac)
 
 #ifdef CONFIG_X86_32
        xor %edx,%edx
        xor %ecx,%ecx
        mov $(-EFAULT),%_ASM_AX
-       ret
+       RET
 SYM_CODE_END(.Lbad_get_user_8_clac)
 #endif
 
 
        imull $0x01010101, %eax, %eax           # w_tmp *= 0x01010101
        shrl $24, %eax                          # w = w_tmp >> 24
        __ASM_SIZE(pop,) %__ASM_REG(dx)
-       ret
+       RET
 SYM_FUNC_END(__sw_hweight32)
 EXPORT_SYMBOL(__sw_hweight32)
 
 
        popq    %rdx
        popq    %rdi
-       ret
+       RET
 #else /* CONFIG_X86_32 */
        /* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */
        pushl   %ecx
        addl    %ecx, %eax                      # result
 
        popl    %ecx
-       ret
+       RET
 #endif
 SYM_FUNC_END(__sw_hweight64)
 EXPORT_SYMBOL(__sw_hweight64)
 
 SYM_FUNC_START(__iowrite32_copy)
        movl %edx,%ecx
        rep movsd
-       ret
+       RET
 SYM_FUNC_END(__iowrite32_copy)
 
        rep movsq
        movl %edx, %ecx
        rep movsb
-       ret
+       RET
 SYM_FUNC_END(memcpy)
 SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(memcpy)
        movq %rdi, %rax
        movq %rdx, %rcx
        rep movsb
-       ret
+       RET
 SYM_FUNC_END(memcpy_erms)
 
 SYM_FUNC_START_LOCAL(memcpy_orig)
        movq %r9,       1*8(%rdi)
        movq %r10,      -2*8(%rdi, %rdx)
        movq %r11,      -1*8(%rdi, %rdx)
-       retq
+       RET
        .p2align 4
 .Lless_16bytes:
        cmpl $8,        %edx
        movq -1*8(%rsi, %rdx),  %r9
        movq %r8,       0*8(%rdi)
        movq %r9,       -1*8(%rdi, %rdx)
-       retq
+       RET
        .p2align 4
 .Lless_8bytes:
        cmpl $4,        %edx
        movl -4(%rsi, %rdx), %r8d
        movl %ecx, (%rdi)
        movl %r8d, -4(%rdi, %rdx)
-       retq
+       RET
        .p2align 4
 .Lless_3bytes:
        subl $1, %edx
        movb %cl, (%rdi)
 
 .Lend:
-       retq
+       RET
 SYM_FUNC_END(memcpy_orig)
 
 .popsection
 
        /* FSRM implies ERMS => no length checks, do the copy directly */
 .Lmemmove_begin_forward:
        ALTERNATIVE "cmp $0x20, %rdx; jb 1f", "", X86_FEATURE_FSRM
-       ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS
+       ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; RET", X86_FEATURE_ERMS
 
        /*
         * movsq instruction have many startup latency
        movb (%rsi), %r11b
        movb %r11b, (%rdi)
 13:
-       retq
+       RET
 SYM_FUNC_END(__memmove)
 SYM_FUNC_END_ALIAS(memmove)
 EXPORT_SYMBOL(__memmove)
 
        movl %edx,%ecx
        rep stosb
        movq %r9,%rax
-       ret
+       RET
 SYM_FUNC_END(__memset)
 SYM_FUNC_END_ALIAS(memset)
 EXPORT_SYMBOL(memset)
        movq %rdx,%rcx
        rep stosb
        movq %r9,%rax
-       ret
+       RET
 SYM_FUNC_END(memset_erms)
 
 SYM_FUNC_START_LOCAL(memset_orig)
 
 .Lende:
        movq    %r10,%rax
-       ret
+       RET
 
 .Lbad_alignment:
        cmpq $7,%rdx
 
        movl    %edi, 28(%r10)
        popq %r12
        popq %rbx
-       ret
+       RET
 3:
        movl    $-EIO, %r11d
        jmp     2b
        popl %esi
        popl %ebp
        popl %ebx
-       ret
+       RET
 3:
        movl    $-EIO, 4(%esp)
        jmp     2b
 
 1:     movb %al,(%_ASM_CX)
        xor %ecx,%ecx
        ASM_CLAC
-       ret
+       RET
 SYM_FUNC_END(__put_user_1)
 EXPORT_SYMBOL(__put_user_1)
 EXPORT_SYMBOL(__put_user_nocheck_1)
 2:     movw %ax,(%_ASM_CX)
        xor %ecx,%ecx
        ASM_CLAC
-       ret
+       RET
 SYM_FUNC_END(__put_user_2)
 EXPORT_SYMBOL(__put_user_2)
 EXPORT_SYMBOL(__put_user_nocheck_2)
 3:     movl %eax,(%_ASM_CX)
        xor %ecx,%ecx
        ASM_CLAC
-       ret
+       RET
 SYM_FUNC_END(__put_user_4)
 EXPORT_SYMBOL(__put_user_4)
 EXPORT_SYMBOL(__put_user_nocheck_4)
 
 .Ldo_rop_\@:
        mov     %\reg, (%_ASM_SP)
        UNWIND_HINT_FUNC
-       ret
+       RET
 .endm
 
 .macro THUNK reg
 
        popl    %esi
 
        leave
-       ret
+       RET
 
 
 #ifdef PARANOID
 
        popl    %esi
 
        leave
-       ret
+       RET
 SYM_FUNC_END(FPU_div_small)
 
 
        popl %esi
        leave
-       ret
+       RET
 SYM_FUNC_END(mul32_Xsig)
 
 
 
        popl %esi
        leave
-       ret
+       RET
 SYM_FUNC_END(mul64_Xsig)
 
 
 
        popl %esi
        leave
-       ret
+       RET
 SYM_FUNC_END(mul_Xsig_Xsig)
 
        popl    %edi
        popl    %esi
        leave
-       ret
+       RET
 SYM_FUNC_END(polynomial_Xsig)
 
 L_exit:
        popl    %ebx
        leave
-       ret
+       RET
 
 
 L_zero:
 
        popl    %ebx
        leave
-       ret
+       RET
 
 L_exit_nuo_zero:
        movl    TAG_Zero,%eax
 
        popl    %ebx
        leave
-       ret
+       RET
 SYM_FUNC_END(FPU_normalize_nuo)
 
        popl    %edi
        popl    %esi
        leave
-       ret
+       RET
 
 
 /*
 
        popl    %edi
        popl    %esi
        leave
-       ret
+       RET
 #endif /* PARANOID */
 SYM_FUNC_END(FPU_u_add)
 
        popl    %esi
 
        leave
-       ret
+       RET
 #endif /* PARANOID */ 
 
 SYM_FUNC_END(FPU_u_div)
 
        popl    %edi
        popl    %esi
        leave
-       ret
+       RET
 #endif /* PARANOID */ 
 
 SYM_FUNC_END(FPU_u_mul)
 
        popl    %edi
        popl    %esi
        leave
-       ret
+       RET
 SYM_FUNC_END(FPU_u_sub)
 
        popl    %esi
        popl    %ebx
        leave
-       ret
+       RET
 SYM_FUNC_END(round_Xsig)
 
 
        popl    %esi
        popl    %ebx
        leave
-       ret
+       RET
 SYM_FUNC_END(norm_Xsig)
 
        popl    %ebx
        popl    %esi
        leave
-       ret
+       RET
 
 L_more_than_31:
        cmpl    $64,%ecx
        movl    $0,8(%esi)
        popl    %esi
        leave
-       ret
+       RET
 
 L_more_than_63:
        cmpl    $96,%ecx
        movl    %edx,8(%esi)
        popl    %esi
        leave
-       ret
+       RET
 
 L_more_than_95:
        xorl    %eax,%eax
        movl    %eax,8(%esi)
        popl    %esi
        leave
-       ret
+       RET
 SYM_FUNC_END(shr_Xsig)
 
        popl    %ebx
        popl    %esi
        leave
-       ret
+       RET
 
 L_more_than_31:
        cmpl    $64,%ecx
        movl    $0,4(%esi)
        popl    %esi
        leave
-       ret
+       RET
 
 L_more_than_63:
        cmpl    $96,%ecx
        movl    %edx,4(%esi)
        popl    %esi
        leave
-       ret
+       RET
 
 L_more_than_95:
        xorl    %eax,%eax
        movl    %eax,4(%esi)
        popl    %esi
        leave
-       ret
+       RET
 SYM_FUNC_END(FPU_shrx)
 
 
        popl    %ebx
        popl    %esi
        leave
-       ret
+       RET
 
 /* Shift by [0..31] bits */
 Ls_less_than_32:
        popl    %ebx
        popl    %esi
        leave
-       ret
+       RET
 
 /* Shift by [64..95] bits */
 Ls_more_than_63:
        popl    %ebx
        popl    %esi
        leave
-       ret
+       RET
 
 Ls_more_than_95:
 /* Shift by [96..inf) bits */
        popl    %ebx
        popl    %esi
        leave
-       ret
+       RET
 SYM_FUNC_END(FPU_shrxs)
 
        movq    %rbp, %rsp              /* Restore original stack pointer */
        pop     %rbp
 
-       ret
+       RET
 SYM_FUNC_END(sme_encrypt_execute)
 
 SYM_FUNC_START(__enc_copy)
        pop     %r12
        pop     %r15
 
-       ret
+       RET
 .L__enc_copy_end:
 SYM_FUNC_END(__enc_copy)
 
 
        movl    16(%esp), %ebx
        leave
-       ret
+       RET
 SYM_FUNC_END(efi_call_svam)
 
        mov %rsi, %rcx
        CALL_NOSPEC rdi
        leave
-       ret
+       RET
 SYM_FUNC_END(__efi_call)
 
 1:     movq    24(%rsp), %rsp
        pop     %rbx
        pop     %rbp
-       retq
+       RET
 
        .code32
 2:     pushl   $__KERNEL_CS
 
        pushfl
        popl saved_context_eflags
 
-       ret
+       RET
 
 restore_registers:
        movl saved_context_ebp, %ebp
        pushl saved_context_eflags
        popfl
 
-       ret
+       RET
 
 SYM_CODE_START(do_olpc_suspend_lowlevel)
        call    save_processor_state
 
        call    restore_registers
        call    restore_processor_state
-       ret
+       RET
 SYM_CODE_END(do_olpc_suspend_lowlevel)
 
 .data
 
        FRAME_BEGIN
        call swsusp_save
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(swsusp_arch_suspend)
 
 SYM_CODE_START(restore_image)
        /* tell the hibernation core that we've just restored the memory */
        movl    %eax, in_suspend
 
-       ret
+       RET
 SYM_FUNC_END(restore_registers)
 
        /* tell the hibernation core that we've just restored the memory */
        movq    %rax, in_suspend(%rip)
 
-       ret
+       RET
 SYM_FUNC_END(restore_registers)
 
 SYM_FUNC_START(swsusp_arch_suspend)
        FRAME_BEGIN
        call swsusp_save
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(swsusp_arch_suspend)
 
 SYM_FUNC_START(restore_image)
 
 7:     
        popl %ebx
        popl %esi
-       ret
+       RET
 
 #else
 
 80: 
        popl %ebx
        popl %esi
-       ret
+       RET
                                
 #endif
        EXPORT_SYMBOL(csum_partial)
 
        movl %esi,12(%edx)
        movl %edi,16(%edx)
        movl %ecx,20(%edx)              # Return address
-       ret
+       RET
 
        .size kernel_setjmp,.-kernel_setjmp
 
 
        movq %r14,40(%rdi)
        movq %r15,48(%rdi)
        movq %rsi,56(%rdi)              # Return address
-       ret
+       RET
 
        .size kernel_setjmp,.-kernel_setjmp
 
 
  */
 SYM_FUNC_START(xen_irq_disable_direct)
        movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
-       ret
+       RET
 SYM_FUNC_END(xen_irq_disable_direct)
 
 /*
        pop %rcx
        pop %rax
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(check_events)
 
 /*
        call check_events
 1:
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(xen_irq_enable_direct)
 
 /*
        testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
        setz %ah
        addb %ah, %ah
-       ret
+       RET
 SYM_FUNC_END(xen_save_fl_direct)
 
 SYM_FUNC_START(xen_read_cr2)
        _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
        _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(xen_read_cr2);
 
 SYM_FUNC_START(xen_read_cr2_direct)
        FRAME_BEGIN
        _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX
        FRAME_END
-       ret
+       RET
 SYM_FUNC_END(xen_read_cr2_direct);
 .popsection
 
 
        .rept (PAGE_SIZE / 32)
                UNWIND_HINT_FUNC
                .skip 31, 0x90
-               ret
+               RET
        .endr
 
 #define HYPERCALL(n) \