#include <crypto/aes.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
 #include <crypto/internal/skcipher.h>
 #include <linux/module.h>
 
 static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
                           u32 abytes, u32 *macp)
 {
-       if (may_use_simd()) {
+       if (crypto_simd_usable()) {
                kernel_neon_begin();
                ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc,
                                     num_rounds(key));
 
        err = skcipher_walk_aead_encrypt(&walk, req, false);
 
-       if (may_use_simd()) {
+       if (crypto_simd_usable()) {
                while (walk.nbytes) {
                        u32 tail = walk.nbytes % AES_BLOCK_SIZE;
 
 
        err = skcipher_walk_aead_decrypt(&walk, req, false);
 
-       if (may_use_simd()) {
+       if (crypto_simd_usable()) {
                while (walk.nbytes) {
                        u32 tail = walk.nbytes % AES_BLOCK_SIZE;
 
 
 #include <asm/simd.h>
 #include <asm/unaligned.h>
 #include <crypto/aes.h>
+#include <crypto/internal/simd.h>
 #include <linux/cpufeature.h>
 #include <linux/crypto.h>
 #include <linux/module.h>
 {
        struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       if (!may_use_simd()) {
+       if (!crypto_simd_usable()) {
                __aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx));
                return;
        }
 {
        struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       if (!may_use_simd()) {
+       if (!crypto_simd_usable()) {
                __aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx));
                return;
        }
 
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
        struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-       if (!may_use_simd())
+       if (!crypto_simd_usable())
                return aes_ctr_encrypt_fallback(ctx, req);
 
        return ctr_encrypt(req);
 {
        int rounds = 6 + ctx->key_length / 4;
 
-       if (may_use_simd()) {
+       if (crypto_simd_usable()) {
                kernel_neon_begin();
                aes_mac_update(in, ctx->key_enc, rounds, blocks, dg, enc_before,
                               enc_after);
 
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
        struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-       if (!may_use_simd())
+       if (!crypto_simd_usable())
                return aes_ctr_encrypt_fallback(&ctx->fallback, req);
 
        return ctr_encrypt(req);
 
 
 #include <crypto/algapi.h>
 #include <crypto/chacha.h>
+#include <crypto/internal/simd.h>
 #include <crypto/internal/skcipher.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
        struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-       if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
+       if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
                return crypto_chacha_crypt(req);
 
        return chacha_neon_stream_xor(req, ctx, req->iv);
        u32 state[16];
        u8 real_iv[16];
 
-       if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
+       if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
                return crypto_xchacha_crypt(req);
 
        crypto_chacha_init(state, ctx, req->iv);
 
 #include <linux/string.h>
 
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 
 #include <asm/neon.h>
 #include <asm/simd.h>
 {
        u16 *crc = shash_desc_ctx(desc);
 
-       if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
+       if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
                kernel_neon_begin();
                *crc = crc_t10dif_pmull_p8(*crc, data, length);
                kernel_neon_end();
 {
        u16 *crc = shash_desc_ctx(desc);
 
-       if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
+       if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
                kernel_neon_begin();
                *crc = crc_t10dif_pmull_p64(*crc, data, length);
                kernel_neon_end();
 
 #include <crypto/gf128mul.h>
 #include <crypto/internal/aead.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/scatterwalk.h>
 #include <linux/cpufeature.h>
                                                struct ghash_key const *k,
                                                const char *head))
 {
-       if (likely(may_use_simd())) {
+       if (likely(crypto_simd_usable())) {
                kernel_neon_begin();
                simd_update(blocks, dg, src, key, head);
                kernel_neon_end();
 
        err = skcipher_walk_aead_encrypt(&walk, req, false);
 
-       if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
+       if (likely(crypto_simd_usable() && walk.total >= 2 * AES_BLOCK_SIZE)) {
                u32 const *rk = NULL;
 
                kernel_neon_begin();
 
        err = skcipher_walk_aead_decrypt(&walk, req, false);
 
-       if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
+       if (likely(crypto_simd_usable() && walk.total >= 2 * AES_BLOCK_SIZE)) {
                u32 const *rk = NULL;
 
                kernel_neon_begin();
 
 #include <asm/neon.h>
 #include <asm/simd.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/nhpoly1305.h>
 #include <linux/module.h>
 
 static int nhpoly1305_neon_update(struct shash_desc *desc,
                                  const u8 *src, unsigned int srclen)
 {
-       if (srclen < 64 || !may_use_simd())
+       if (srclen < 64 || !crypto_simd_usable())
                return crypto_nhpoly1305_update(desc, src, srclen);
 
        do {
 
 #include <asm/simd.h>
 #include <asm/unaligned.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/sha.h>
 #include <crypto/sha1_base.h>
 #include <linux/cpufeature.h>
 {
        struct sha1_ce_state *sctx = shash_desc_ctx(desc);
 
-       if (!may_use_simd())
+       if (!crypto_simd_usable())
                return crypto_sha1_update(desc, data, len);
 
        sctx->finalize = 0;
        struct sha1_ce_state *sctx = shash_desc_ctx(desc);
        bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
 
-       if (!may_use_simd())
+       if (!crypto_simd_usable())
                return crypto_sha1_finup(desc, data, len, out);
 
        /*
 {
        struct sha1_ce_state *sctx = shash_desc_ctx(desc);
 
-       if (!may_use_simd())
+       if (!crypto_simd_usable())
                return crypto_sha1_finup(desc, NULL, 0, out);
 
        sctx->finalize = 0;
 
 #include <asm/simd.h>
 #include <asm/unaligned.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/sha.h>
 #include <crypto/sha256_base.h>
 #include <linux/cpufeature.h>
 {
        struct sha256_ce_state *sctx = shash_desc_ctx(desc);
 
-       if (!may_use_simd())
+       if (!crypto_simd_usable())
                return sha256_base_do_update(desc, data, len,
                                (sha256_block_fn *)sha256_block_data_order);
 
        struct sha256_ce_state *sctx = shash_desc_ctx(desc);
        bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
 
-       if (!may_use_simd()) {
+       if (!crypto_simd_usable()) {
                if (len)
                        sha256_base_do_update(desc, data, len,
                                (sha256_block_fn *)sha256_block_data_order);
 {
        struct sha256_ce_state *sctx = shash_desc_ctx(desc);
 
-       if (!may_use_simd()) {
+       if (!crypto_simd_usable()) {
                sha256_base_do_finalize(desc,
                                (sha256_block_fn *)sha256_block_data_order);
                return sha256_base_finish(desc, out);
 
 #include <asm/neon.h>
 #include <asm/simd.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/sha.h>
 #include <crypto/sha256_base.h>
 #include <linux/cryptohash.h>
 {
        struct sha256_state *sctx = shash_desc_ctx(desc);
 
-       if (!may_use_simd())
+       if (!crypto_simd_usable())
                return sha256_base_do_update(desc, data, len,
                                (sha256_block_fn *)sha256_block_data_order);
 
 static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
                             unsigned int len, u8 *out)
 {
-       if (!may_use_simd()) {
+       if (!crypto_simd_usable()) {
                if (len)
                        sha256_base_do_update(desc, data, len,
                                (sha256_block_fn *)sha256_block_data_order);
 
 #include <asm/simd.h>
 #include <asm/unaligned.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/sha3.h>
 #include <linux/cpufeature.h>
 #include <linux/crypto.h>
        struct sha3_state *sctx = shash_desc_ctx(desc);
        unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
 
-       if (!may_use_simd())
+       if (!crypto_simd_usable())
                return crypto_sha3_update(desc, data, len);
 
        if ((sctx->partial + len) >= sctx->rsiz) {
        __le64 *digest = (__le64 *)out;
        int i;
 
-       if (!may_use_simd())
+       if (!crypto_simd_usable())
                return crypto_sha3_final(desc, out);
 
        sctx->buf[sctx->partial++] = 0x06;
 
 #include <asm/simd.h>
 #include <asm/unaligned.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/sha.h>
 #include <crypto/sha512_base.h>
 #include <linux/cpufeature.h>
 static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
                            unsigned int len)
 {
-       if (!may_use_simd())
+       if (!crypto_simd_usable())
                return sha512_base_do_update(desc, data, len,
                                (sha512_block_fn *)sha512_block_data_order);
 
 static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
                           unsigned int len, u8 *out)
 {
-       if (!may_use_simd()) {
+       if (!crypto_simd_usable()) {
                if (len)
                        sha512_base_do_update(desc, data, len,
                                (sha512_block_fn *)sha512_block_data_order);
 
 static int sha512_ce_final(struct shash_desc *desc, u8 *out)
 {
-       if (!may_use_simd()) {
+       if (!crypto_simd_usable()) {
                sha512_base_do_finalize(desc,
                                (sha512_block_fn *)sha512_block_data_order);
                return sha512_base_finish(desc, out);
 
 #include <asm/simd.h>
 #include <asm/unaligned.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/sm3.h>
 #include <crypto/sm3_base.h>
 #include <linux/cpufeature.h>
 static int sm3_ce_update(struct shash_desc *desc, const u8 *data,
                         unsigned int len)
 {
-       if (!may_use_simd())
+       if (!crypto_simd_usable())
                return crypto_sm3_update(desc, data, len);
 
        kernel_neon_begin();
 
 static int sm3_ce_final(struct shash_desc *desc, u8 *out)
 {
-       if (!may_use_simd())
+       if (!crypto_simd_usable())
                return crypto_sm3_finup(desc, NULL, 0, out);
 
        kernel_neon_begin();
 static int sm3_ce_finup(struct shash_desc *desc, const u8 *data,
                        unsigned int len, u8 *out)
 {
-       if (!may_use_simd())
+       if (!crypto_simd_usable())
                return crypto_sm3_finup(desc, data, len, out);
 
        kernel_neon_begin();
 
 #include <asm/neon.h>
 #include <asm/simd.h>
 #include <crypto/sm4.h>
+#include <crypto/internal/simd.h>
 #include <linux/module.h>
 #include <linux/cpufeature.h>
 #include <linux/crypto.h>
 {
        const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       if (!may_use_simd()) {
+       if (!crypto_simd_usable()) {
                crypto_sm4_encrypt(tfm, out, in);
        } else {
                kernel_neon_begin();
 {
        const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       if (!may_use_simd()) {
+       if (!crypto_simd_usable()) {
                crypto_sm4_decrypt(tfm, out, in);
        } else {
                kernel_neon_begin();