]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
crypto: arch/lib - limit simd usage to 4k chunks
authorJason A. Donenfeld <Jason@zx2c4.com>
Wed, 22 Apr 2020 23:18:53 +0000 (17:18 -0600)
committerHerbert Xu <herbert@gondor.apana.org.au>
Thu, 30 Apr 2020 05:16:59 +0000 (15:16 +1000)
The initial Zinc patchset, after some mailing list discussion, contained
code to ensure that kernel_fpu_enable would not be kept on for more than
a 4k chunk, since it disables preemption. The choice of 4k isn't totally
scientific, but it's not a bad guess either, and it's what's used in
both the x86 poly1305, blake2s, and nhpoly1305 code already (in the form
of PAGE_SIZE, which this commit corrects to be explicitly 4k for the
former two).

Ard did some back of the envelope calculations and found that
at 5 cycles/byte (overestimate) on a 1ghz processor (pretty slow), 4k
means we have a maximum preemption disabling of 20us, which Sebastian
confirmed was probably a good limit.

Unfortunately the chunking appears to have been left out of the final
patchset that added the glue code. So, this commit adds it back in.

Fixes: 84e03fa39fbe ("crypto: x86/chacha - expose SIMD ChaCha routine as library function")
Fixes: b3aad5bad26a ("crypto: arm64/chacha - expose arm64 ChaCha routine as library function")
Fixes: a44a3430d71b ("crypto: arm/chacha - expose ARM ChaCha routine as library function")
Fixes: d7d7b8535662 ("crypto: x86/poly1305 - wire up faster implementations for kernel")
Fixes: f569ca164751 ("crypto: arm64/poly1305 - incorporate OpenSSL/CRYPTOGAMS NEON implementation")
Fixes: a6b803b3ddc7 ("crypto: arm/poly1305 - incorporate OpenSSL/CRYPTOGAMS NEON implementation")
Fixes: ed0356eda153 ("crypto: blake2s - x86_64 SIMD implementation")
Cc: Eric Biggers <ebiggers@google.com>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: stable@vger.kernel.org
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
arch/arm/crypto/chacha-glue.c
arch/arm/crypto/poly1305-glue.c
arch/arm64/crypto/chacha-neon-glue.c
arch/arm64/crypto/poly1305-glue.c
arch/x86/crypto/blake2s-glue.c
arch/x86/crypto/chacha_glue.c
arch/x86/crypto/poly1305_glue.c

index 6fdb0ac62b3d85076824ecbb93eae1139bd79a57..59da6c0b63b62bc0e6015fa1b1ccfc8d74e8b9d6 100644 (file)
@@ -91,9 +91,17 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
                return;
        }
 
-       kernel_neon_begin();
-       chacha_doneon(state, dst, src, bytes, nrounds);
-       kernel_neon_end();
+       do {
+               unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
+
+               kernel_neon_begin();
+               chacha_doneon(state, dst, src, todo, nrounds);
+               kernel_neon_end();
+
+               bytes -= todo;
+               src += todo;
+               dst += todo;
+       } while (bytes);
 }
 EXPORT_SYMBOL(chacha_crypt_arch);
 
index ceec04ec2f4054eed9f2d8eea12dd87ce0585e72..13cfef4ae22e312dcb9f6b032f9d5c6bfbb27ee9 100644 (file)
@@ -160,13 +160,20 @@ void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
                unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE);
 
                if (static_branch_likely(&have_neon) && do_neon) {
-                       kernel_neon_begin();
-                       poly1305_blocks_neon(&dctx->h, src, len, 1);
-                       kernel_neon_end();
+                       do {
+                               unsigned int todo = min_t(unsigned int, len, SZ_4K);
+
+                               kernel_neon_begin();
+                               poly1305_blocks_neon(&dctx->h, src, todo, 1);
+                               kernel_neon_end();
+
+                               len -= todo;
+                               src += todo;
+                       } while (len);
                } else {
                        poly1305_blocks_arm(&dctx->h, src, len, 1);
+                       src += len;
                }
-               src += len;
                nbytes %= POLY1305_BLOCK_SIZE;
        }
 
index 37ca3e8898484572d37ae0222cf13100c4dce952..af2bbca38e70fb3514bf594cafffbf907c8eeffa 100644 (file)
@@ -87,9 +87,17 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
            !crypto_simd_usable())
                return chacha_crypt_generic(state, dst, src, bytes, nrounds);
 
-       kernel_neon_begin();
-       chacha_doneon(state, dst, src, bytes, nrounds);
-       kernel_neon_end();
+       do {
+               unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
+
+               kernel_neon_begin();
+               chacha_doneon(state, dst, src, todo, nrounds);
+               kernel_neon_end();
+
+               bytes -= todo;
+               src += todo;
+               dst += todo;
+       } while (bytes);
 }
 EXPORT_SYMBOL(chacha_crypt_arch);
 
index e97b092f56b8fcae5531be3db610737b22f7a057..f33ada70c4ed8f84171b202e3f447880e0e7696f 100644 (file)
@@ -143,13 +143,20 @@ void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
                unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE);
 
                if (static_branch_likely(&have_neon) && crypto_simd_usable()) {
-                       kernel_neon_begin();
-                       poly1305_blocks_neon(&dctx->h, src, len, 1);
-                       kernel_neon_end();
+                       do {
+                               unsigned int todo = min_t(unsigned int, len, SZ_4K);
+
+                               kernel_neon_begin();
+                               poly1305_blocks_neon(&dctx->h, src, todo, 1);
+                               kernel_neon_end();
+
+                               len -= todo;
+                               src += todo;
+                       } while (len);
                } else {
                        poly1305_blocks(&dctx->h, src, len, 1);
+                       src += len;
                }
-               src += len;
                nbytes %= POLY1305_BLOCK_SIZE;
        }
 
index 06ef2d4a470171aecc249d7bbddedccebf67e04e..6737bcea1fa148a129cbc334c92778889dc41963 100644 (file)
@@ -32,16 +32,16 @@ void blake2s_compress_arch(struct blake2s_state *state,
                           const u32 inc)
 {
        /* SIMD disables preemption, so relax after processing each page. */
-       BUILD_BUG_ON(PAGE_SIZE / BLAKE2S_BLOCK_SIZE < 8);
+       BUILD_BUG_ON(SZ_4K / BLAKE2S_BLOCK_SIZE < 8);
 
        if (!static_branch_likely(&blake2s_use_ssse3) || !crypto_simd_usable()) {
                blake2s_compress_generic(state, block, nblocks, inc);
                return;
        }
 
-       for (;;) {
+       do {
                const size_t blocks = min_t(size_t, nblocks,
-                                           PAGE_SIZE / BLAKE2S_BLOCK_SIZE);
+                                           SZ_4K / BLAKE2S_BLOCK_SIZE);
 
                kernel_fpu_begin();
                if (IS_ENABLED(CONFIG_AS_AVX512) &&
@@ -52,10 +52,8 @@ void blake2s_compress_arch(struct blake2s_state *state,
                kernel_fpu_end();
 
                nblocks -= blocks;
-               if (!nblocks)
-                       break;
                block += blocks * BLAKE2S_BLOCK_SIZE;
-       }
+       } while (nblocks);
 }
 EXPORT_SYMBOL(blake2s_compress_arch);
 
index b412c21ee06e29a7d29aa94b6ada32e4da09484d..22250091cdbec81e24d9a9e2345e2966b511a2a0 100644 (file)
@@ -153,9 +153,17 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
            bytes <= CHACHA_BLOCK_SIZE)
                return chacha_crypt_generic(state, dst, src, bytes, nrounds);
 
-       kernel_fpu_begin();
-       chacha_dosimd(state, dst, src, bytes, nrounds);
-       kernel_fpu_end();
+       do {
+               unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
+
+               kernel_fpu_begin();
+               chacha_dosimd(state, dst, src, todo, nrounds);
+               kernel_fpu_end();
+
+               bytes -= todo;
+               src += todo;
+               dst += todo;
+       } while (bytes);
 }
 EXPORT_SYMBOL(chacha_crypt_arch);
 
index 6dfec19f7d579aa7b717dc511861324e3d3a86e8..dfe921efa9b25cf2816564a84bd50412a5d73770 100644 (file)
@@ -91,8 +91,8 @@ static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len,
        struct poly1305_arch_internal *state = ctx;
 
        /* SIMD disables preemption, so relax after processing each page. */
-       BUILD_BUG_ON(PAGE_SIZE < POLY1305_BLOCK_SIZE ||
-                    PAGE_SIZE % POLY1305_BLOCK_SIZE);
+       BUILD_BUG_ON(SZ_4K < POLY1305_BLOCK_SIZE ||
+                    SZ_4K % POLY1305_BLOCK_SIZE);
 
        if (!static_branch_likely(&poly1305_use_avx) ||
            (len < (POLY1305_BLOCK_SIZE * 18) && !state->is_base2_26) ||
@@ -102,8 +102,8 @@ static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len,
                return;
        }
 
-       for (;;) {
-               const size_t bytes = min_t(size_t, len, PAGE_SIZE);
+       do {
+               const size_t bytes = min_t(size_t, len, SZ_4K);
 
                kernel_fpu_begin();
                if (IS_ENABLED(CONFIG_AS_AVX512) && static_branch_likely(&poly1305_use_avx512))
@@ -113,11 +113,10 @@ static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len,
                else
                        poly1305_blocks_avx(ctx, inp, bytes, padbit);
                kernel_fpu_end();
+
                len -= bytes;
-               if (!len)
-                       break;
                inp += bytes;
-       }
+       } while (len);
 }
 
 static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],