.word           0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
 
        /*
-        * void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
-        *                        int blocks)
+        * int __sha256_ce_transform(struct sha256_ce_state *sst, u8 const *src,
+        *                           int blocks)
         */
        .text
-SYM_FUNC_START(sha2_ce_transform)
+SYM_FUNC_START(__sha256_ce_transform)
        /* load round constants */
        adr_l           x8, .Lsha2_rcon
        ld1             { v0.4s- v3.4s}, [x8], #64
 3:     st1             {dgav.4s, dgbv.4s}, [x0]
        mov             w0, w2
        ret
-SYM_FUNC_END(sha2_ce_transform)
+SYM_FUNC_END(__sha256_ce_transform)
 
 extern const u32 sha256_ce_offsetof_count;
 extern const u32 sha256_ce_offsetof_finalize;
 
-asmlinkage int sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
-                                int blocks);
+asmlinkage int __sha256_ce_transform(struct sha256_ce_state *sst, u8 const *src,
+                                    int blocks);
 
-static void __sha2_ce_transform(struct sha256_state *sst, u8 const *src,
+static void sha256_ce_transform(struct sha256_state *sst, u8 const *src,
                                int blocks)
 {
        while (blocks) {
                int rem;
 
                kernel_neon_begin();
-               rem = sha2_ce_transform(container_of(sst, struct sha256_ce_state,
-                                                    sst), src, blocks);
+               rem = __sha256_ce_transform(container_of(sst,
+                                                        struct sha256_ce_state,
+                                                        sst), src, blocks);
                kernel_neon_end();
                src += (blocks - rem) * SHA256_BLOCK_SIZE;
                blocks = rem;
 
 asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
 
-static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
-                                     int blocks)
+static void sha256_arm64_transform(struct sha256_state *sst, u8 const *src,
+                                  int blocks)
 {
        sha256_block_data_order(sst->state, src, blocks);
 }
 
        if (!crypto_simd_usable())
                return sha256_base_do_update(desc, data, len,
-                               __sha256_block_data_order);
+                                            sha256_arm64_transform);
 
        sctx->finalize = 0;
-       sha256_base_do_update(desc, data, len, __sha2_ce_transform);
+       sha256_base_do_update(desc, data, len, sha256_ce_transform);
 
        return 0;
 }
        if (!crypto_simd_usable()) {
                if (len)
                        sha256_base_do_update(desc, data, len,
-                               __sha256_block_data_order);
-               sha256_base_do_finalize(desc, __sha256_block_data_order);
+                                             sha256_arm64_transform);
+               sha256_base_do_finalize(desc, sha256_arm64_transform);
                return sha256_base_finish(desc, out);
        }
 
         */
        sctx->finalize = finalize;
 
-       sha256_base_do_update(desc, data, len, __sha2_ce_transform);
+       sha256_base_do_update(desc, data, len, sha256_ce_transform);
        if (!finalize)
-               sha256_base_do_finalize(desc, __sha2_ce_transform);
+               sha256_base_do_finalize(desc, sha256_ce_transform);
        return sha256_base_finish(desc, out);
 }
 
        struct sha256_ce_state *sctx = shash_desc_ctx(desc);
 
        if (!crypto_simd_usable()) {
-               sha256_base_do_finalize(desc, __sha256_block_data_order);
+               sha256_base_do_finalize(desc, sha256_arm64_transform);
                return sha256_base_finish(desc, out);
        }
 
        sctx->finalize = 0;
-       sha256_base_do_finalize(desc, __sha2_ce_transform);
+       sha256_base_do_finalize(desc, sha256_ce_transform);
        return sha256_base_finish(desc, out);
 }