.word  0                               @ terminator
 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
 .LOPENSSL_armcap:
-.word  OPENSSL_armcap_P-sha256_block_data_order
+.word  OPENSSL_armcap_P-sha256_blocks_arch
 #endif
 .align 5
 
-.global        sha256_block_data_order
-.type  sha256_block_data_order,%function
-sha256_block_data_order:
-.Lsha256_block_data_order:
+.global        sha256_blocks_arch
+.type  sha256_blocks_arch,%function
+sha256_blocks_arch:
+.Lsha256_blocks_arch:
 #if __ARM_ARCH__<7
-       sub     r3,pc,#8                @ sha256_block_data_order
+       sub     r3,pc,#8                @ sha256_blocks_arch
 #else
-       adr     r3,.Lsha256_block_data_order
+       adr     r3,.Lsha256_blocks_arch
 #endif
 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
        ldr     r12,.LOPENSSL_armcap
        moveq   pc,lr                   @ be binary compatible with V4, yet
        bx      lr                      @ interoperable with Thumb ISA:-)
 #endif
-.size  sha256_block_data_order,.-sha256_block_data_order
+.size  sha256_blocks_arch,.-sha256_blocks_arch
 ___
 ######################################################################
 # NEON stuff
        stmdb   sp!,{r4-r12,lr}
 
        sub     $H,sp,#16*4+16
-       adr     $Ktbl,.Lsha256_block_data_order
-       sub     $Ktbl,$Ktbl,#.Lsha256_block_data_order-K256
+       adr     $Ktbl,.Lsha256_blocks_arch
+       sub     $Ktbl,$Ktbl,#.Lsha256_blocks_arch-K256
        bic     $H,$H,#15               @ align for 128-bit stores
        mov     $t2,sp
        mov     sp,$H                   @ alloca
 
  */
 #include <asm/neon.h>
 #include <crypto/internal/sha2.h>
-#include <crypto/internal/simd.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 
-asmlinkage void sha256_block_data_order(u32 state[SHA256_STATE_WORDS],
-                                       const u8 *data, size_t nblocks);
+asmlinkage void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
+                                  const u8 *data, size_t nblocks);
+EXPORT_SYMBOL_GPL(sha256_blocks_arch);
 asmlinkage void sha256_block_data_order_neon(u32 state[SHA256_STATE_WORDS],
                                             const u8 *data, size_t nblocks);
 asmlinkage void sha256_ce_transform(u32 state[SHA256_STATE_WORDS],
 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
 
-void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
+void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
                        const u8 *data, size_t nblocks)
 {
        if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
-           static_branch_likely(&have_neon) && crypto_simd_usable()) {
+           static_branch_likely(&have_neon)) {
                kernel_neon_begin();
                if (static_branch_likely(&have_ce))
                        sha256_ce_transform(state, data, nblocks);
                        sha256_block_data_order_neon(state, data, nblocks);
                kernel_neon_end();
        } else {
-               sha256_block_data_order(state, data, nblocks);
+               sha256_blocks_arch(state, data, nblocks);
        }
 }
-EXPORT_SYMBOL_GPL(sha256_blocks_arch);
+EXPORT_SYMBOL_GPL(sha256_blocks_simd);
 
 bool sha256_is_arch_optimized(void)
 {