From adcb9e32e5e28935ec1148e1a314282a7367428d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 2 May 2025 13:31:03 +0800 Subject: [PATCH 01/16] crypto: arm64/sha256 - Add simd block function Add CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD and a SIMD block function so that the caller can decide whether to use SIMD. Signed-off-by: Herbert Xu --- arch/arm64/crypto/sha512-glue.c | 6 +++--- arch/arm64/lib/crypto/Kconfig | 1 + arch/arm64/lib/crypto/sha2-armv8.pl | 2 +- arch/arm64/lib/crypto/sha256.c | 14 +++++++------- 4 files changed, 12 insertions(+), 11 deletions(-) diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c index ab2e1c13dfad..15aa9d8b7b2c 100644 --- a/arch/arm64/crypto/sha512-glue.c +++ b/arch/arm64/crypto/sha512-glue.c @@ -18,13 +18,13 @@ MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("sha384"); MODULE_ALIAS_CRYPTO("sha512"); -asmlinkage void sha512_block_data_order(u64 *digest, const void *data, - unsigned int num_blks); +asmlinkage void sha512_blocks_arch(u64 *digest, const void *data, + unsigned int num_blks); static void sha512_arm64_transform(struct sha512_state *sst, u8 const *src, int blocks) { - sha512_block_data_order(sst->state, src, blocks); + sha512_blocks_arch(sst->state, src, blocks); } static int sha512_update(struct shash_desc *desc, const u8 *data, diff --git a/arch/arm64/lib/crypto/Kconfig b/arch/arm64/lib/crypto/Kconfig index 49e57bfdb5b5..129a7685cb4c 100644 --- a/arch/arm64/lib/crypto/Kconfig +++ b/arch/arm64/lib/crypto/Kconfig @@ -17,3 +17,4 @@ config CRYPTO_SHA256_ARM64 tristate default CRYPTO_LIB_SHA256 select CRYPTO_ARCH_HAVE_LIB_SHA256 + select CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD diff --git a/arch/arm64/lib/crypto/sha2-armv8.pl b/arch/arm64/lib/crypto/sha2-armv8.pl index 35ec9ae99fe1..4aebd20c498b 100644 --- a/arch/arm64/lib/crypto/sha2-armv8.pl +++ b/arch/arm64/lib/crypto/sha2-armv8.pl @@ -95,7 +95,7 @@ if ($output =~ /512/) { $reg_t="w"; } -$func="sha${BITS}_block_data_order"; +$func="sha${BITS}_blocks_arch"; ($ctx,$inp,$num,$Ktbl)=map("x$_",(0..2,30)); diff --git a/arch/arm64/lib/crypto/sha256.c b/arch/arm64/lib/crypto/sha256.c index fb9bff40357b..bcf7a3adc0c4 100644 --- a/arch/arm64/lib/crypto/sha256.c +++ b/arch/arm64/lib/crypto/sha256.c @@ -6,12 +6,12 @@ */ #include #include -#include #include #include -asmlinkage void sha256_block_data_order(u32 state[SHA256_STATE_WORDS], - const u8 *data, size_t nblocks); +asmlinkage void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks); +EXPORT_SYMBOL_GPL(sha256_blocks_arch); asmlinkage void sha256_block_neon(u32 state[SHA256_STATE_WORDS], const u8 *data, size_t nblocks); asmlinkage size_t __sha256_ce_transform(u32 state[SHA256_STATE_WORDS], @@ -20,11 +20,11 @@ asmlinkage size_t __sha256_ce_transform(u32 state[SHA256_STATE_WORDS], static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce); -void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS], +void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS], const u8 *data, size_t nblocks) { if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && - static_branch_likely(&have_neon) && crypto_simd_usable()) { + static_branch_likely(&have_neon)) { if (static_branch_likely(&have_ce)) { do { size_t rem; @@ -42,10 +42,10 @@ void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS], kernel_neon_end(); } } else { - sha256_block_data_order(state, data, nblocks); + sha256_blocks_arch(state, data, nblocks); } } -EXPORT_SYMBOL_GPL(sha256_blocks_arch); +EXPORT_SYMBOL_GPL(sha256_blocks_simd); bool sha256_is_arch_optimized(void) { -- 2.51.0 From 491d6024f2820c78216b07cec1cb47c87dcae077 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 2 May 2025 13:31:05 +0800 Subject: [PATCH 02/16] crypto: riscv/sha256 - Add simd block function Add CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD and a SIMD block function so that the caller can decide whether to use SIMD. Signed-off-by: Herbert Xu --- arch/riscv/lib/crypto/Kconfig | 1 + arch/riscv/lib/crypto/sha256.c | 13 +++++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/arch/riscv/lib/crypto/Kconfig b/arch/riscv/lib/crypto/Kconfig index c100571feb7e..47c99ea97ce2 100644 --- a/arch/riscv/lib/crypto/Kconfig +++ b/arch/riscv/lib/crypto/Kconfig @@ -12,4 +12,5 @@ config CRYPTO_SHA256_RISCV64 depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO default CRYPTO_LIB_SHA256 select CRYPTO_ARCH_HAVE_LIB_SHA256 + select CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD select CRYPTO_LIB_SHA256_GENERIC diff --git a/arch/riscv/lib/crypto/sha256.c b/arch/riscv/lib/crypto/sha256.c index 4ad3ffb8e0a9..71808397dff4 100644 --- a/arch/riscv/lib/crypto/sha256.c +++ b/arch/riscv/lib/crypto/sha256.c @@ -9,10 +9,8 @@ * Author: Jerry Shih */ -#include #include #include -#include #include #include @@ -21,10 +19,10 @@ asmlinkage void sha256_transform_zvknha_or_zvknhb_zvkb( static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_extensions); -void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS], +void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS], const u8 *data, size_t nblocks) { - if (static_branch_likely(&have_extensions) && crypto_simd_usable()) { + if (static_branch_likely(&have_extensions)) { kernel_vector_begin(); sha256_transform_zvknha_or_zvknhb_zvkb(state, data, nblocks); kernel_vector_end(); @@ -32,6 +30,13 @@ void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS], sha256_blocks_generic(state, data, nblocks); } } +EXPORT_SYMBOL_GPL(sha256_blocks_simd); + +void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks) +{ + sha256_blocks_generic(state, data, nblocks); +} EXPORT_SYMBOL_GPL(sha256_blocks_arch); bool sha256_is_arch_optimized(void) -- 2.51.0 From ee8a720e39ceb7495ab639c1eb6d4987fb6a52bf Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 2 May 2025 13:31:07 +0800 Subject: [PATCH 03/16] crypto: x86/sha256 - Add simd block function Add CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD and a SIMD block function so that the caller can decide whether to use SIMD. Signed-off-by: Herbert Xu --- arch/x86/lib/crypto/Kconfig | 1 + arch/x86/lib/crypto/sha256.c | 12 +++++++++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/arch/x86/lib/crypto/Kconfig b/arch/x86/lib/crypto/Kconfig index e344579db3d8..5e94cdee492c 100644 --- a/arch/x86/lib/crypto/Kconfig +++ b/arch/x86/lib/crypto/Kconfig @@ -30,4 +30,5 @@ config CRYPTO_SHA256_X86_64 depends on 64BIT default CRYPTO_LIB_SHA256 select CRYPTO_ARCH_HAVE_LIB_SHA256 + select CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD select CRYPTO_LIB_SHA256_GENERIC diff --git a/arch/x86/lib/crypto/sha256.c b/arch/x86/lib/crypto/sha256.c index baba74d7d26f..80380f8fdcee 100644 --- a/arch/x86/lib/crypto/sha256.c +++ b/arch/x86/lib/crypto/sha256.c @@ -6,7 +6,6 @@ */ #include #include -#include #include #include #include @@ -24,10 +23,10 @@ static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_sha256_x86); DEFINE_STATIC_CALL(sha256_blocks_x86, sha256_transform_ssse3); -void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS], +void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS], const u8 *data, size_t nblocks) { - if (static_branch_likely(&have_sha256_x86) && crypto_simd_usable()) { + if (static_branch_likely(&have_sha256_x86)) { kernel_fpu_begin(); static_call(sha256_blocks_x86)(state, data, nblocks); kernel_fpu_end(); @@ -35,6 +34,13 @@ void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS], sha256_blocks_generic(state, data, nblocks); } } +EXPORT_SYMBOL_GPL(sha256_blocks_simd); + +void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks) +{ + sha256_blocks_generic(state, data, nblocks); +} EXPORT_SYMBOL_GPL(sha256_blocks_arch); bool sha256_is_arch_optimized(void) -- 2.51.0 From 3007e90572d0c5fd409c3d2fa8cedcbd5cb06d4b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 2 May 2025 13:31:09 +0800 Subject: [PATCH 04/16] crypto: lib/sha256 - Use generic block helper Use the BLOCK_HASH_UPDATE_BLOCKS helper instead of duplicating partial block handling. Also remove the unused lib/sha256 force-generic interface. Signed-off-by: Herbert Xu --- include/crypto/internal/sha2.h | 7 ---- lib/crypto/sha256.c | 71 +++++----------------------------- 2 files changed, 10 insertions(+), 68 deletions(-) diff --git a/include/crypto/internal/sha2.h b/include/crypto/internal/sha2.h index fff156f66edc..b9bccd3ff57f 100644 --- a/include/crypto/internal/sha2.h +++ b/include/crypto/internal/sha2.h @@ -10,13 +10,6 @@ #include #include -void sha256_update_generic(struct sha256_state *sctx, - const u8 *data, size_t len); -void sha256_final_generic(struct sha256_state *sctx, - u8 out[SHA256_DIGEST_SIZE]); -void sha224_final_generic(struct sha256_state *sctx, - u8 out[SHA224_DIGEST_SIZE]); - #if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256) bool sha256_is_arch_optimized(void); #else diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c index 2ced29efa181..107e5162507a 100644 --- a/lib/crypto/sha256.c +++ b/lib/crypto/sha256.c @@ -11,6 +11,7 @@ * Copyright (c) 2014 Red Hat Inc. */ +#include #include #include #include @@ -31,71 +32,40 @@ static inline bool sha256_purgatory(void) } static inline void sha256_blocks(u32 state[SHA256_STATE_WORDS], const u8 *data, - size_t nblocks, bool force_generic) + size_t nblocks) { - sha256_choose_blocks(state, data, nblocks, - force_generic || sha256_purgatory(), false); + sha256_choose_blocks(state, data, nblocks, sha256_purgatory(), false); } -static inline void __sha256_update(struct sha256_state *sctx, const u8 *data, - size_t len, bool force_generic) +void sha256_update(struct sha256_state *sctx, const u8 *data, size_t len) { size_t partial = sctx->count % SHA256_BLOCK_SIZE; sctx->count += len; - - if (partial + len >= SHA256_BLOCK_SIZE) { - size_t nblocks; - - if (partial) { - size_t l = SHA256_BLOCK_SIZE - partial; - - memcpy(&sctx->buf[partial], data, l); - data += l; - len -= l; - - sha256_blocks(sctx->state, sctx->buf, 1, force_generic); - } - - nblocks = len / SHA256_BLOCK_SIZE; - len %= SHA256_BLOCK_SIZE; - - if (nblocks) { - sha256_blocks(sctx->state, data, nblocks, - force_generic); - data += nblocks * SHA256_BLOCK_SIZE; - } - partial = 0; - } - if (len) - memcpy(&sctx->buf[partial], data, len); -} - -void sha256_update(struct sha256_state *sctx, const u8 *data, size_t len) -{ - __sha256_update(sctx, data, len, false); + BLOCK_HASH_UPDATE_BLOCKS(sha256_blocks, sctx->ctx.state, data, len, + SHA256_BLOCK_SIZE, sctx->buf, partial); } EXPORT_SYMBOL(sha256_update); static inline void __sha256_final(struct sha256_state *sctx, u8 *out, - size_t digest_size, bool force_generic) + size_t digest_size) { size_t partial = sctx->count % SHA256_BLOCK_SIZE; sha256_finup(&sctx->ctx, sctx->buf, partial, out, digest_size, - force_generic || sha256_purgatory(), false); + sha256_purgatory(), false); memzero_explicit(sctx, sizeof(*sctx)); } void sha256_final(struct sha256_state *sctx, u8 out[SHA256_DIGEST_SIZE]) { - __sha256_final(sctx, out, SHA256_DIGEST_SIZE, false); + __sha256_final(sctx, out, SHA256_DIGEST_SIZE); } EXPORT_SYMBOL(sha256_final); void sha224_final(struct sha256_state *sctx, u8 out[SHA224_DIGEST_SIZE]) { - __sha256_final(sctx, out, SHA224_DIGEST_SIZE, false); + __sha256_final(sctx, out, SHA224_DIGEST_SIZE); } EXPORT_SYMBOL(sha224_final); @@ -109,26 +79,5 @@ void sha256(const u8 *data, size_t len, u8 out[SHA256_DIGEST_SIZE]) } EXPORT_SYMBOL(sha256); -#if IS_ENABLED(CONFIG_CRYPTO_SHA256) && !defined(__DISABLE_EXPORTS) -void sha256_update_generic(struct sha256_state *sctx, - const u8 *data, size_t len) -{ - __sha256_update(sctx, data, len, true); -} -EXPORT_SYMBOL(sha256_update_generic); - -void sha256_final_generic(struct sha256_state *sctx, u8 out[SHA256_DIGEST_SIZE]) -{ - __sha256_final(sctx, out, SHA256_DIGEST_SIZE, true); -} -EXPORT_SYMBOL(sha256_final_generic); - -void sha224_final_generic(struct sha256_state *sctx, u8 out[SHA224_DIGEST_SIZE]) -{ - __sha256_final(sctx, out, SHA224_DIGEST_SIZE, true); -} -EXPORT_SYMBOL(sha224_final_generic); -#endif - MODULE_DESCRIPTION("SHA-256 Algorithm"); MODULE_LICENSE("GPL"); -- 2.51.0 From 3bf5337879101166dfacfbc2a780d1a379c288ba Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 2 May 2025 13:31:12 +0800 Subject: [PATCH 05/16] crypto: sha256 - Use the partial block API Use the shash partial block API by default. Add a separate set of lib shash algorithms to preserve testing coverage until lib/sha256 has its own tests. Signed-off-by: Herbert Xu --- crypto/sha256.c | 81 +++++++++++++++++++++++++++++++++++-------------- 1 file changed, 58 insertions(+), 23 deletions(-) diff --git a/crypto/sha256.c b/crypto/sha256.c index cf190114574e..4aeb213bab11 100644 --- a/crypto/sha256.c +++ b/crypto/sha256.c @@ -52,14 +52,20 @@ static int crypto_sha256_update_generic(struct shash_desc *desc, const u8 *data, return crypto_sha256_update(desc, data, len, true); } -static int crypto_sha256_update_arch(struct shash_desc *desc, const u8 *data, - unsigned int len) +static int crypto_sha256_update_lib(struct shash_desc *desc, const u8 *data, + unsigned int len) { sha256_update(shash_desc_ctx(desc), data, len); return 0; } -static int crypto_sha256_final_arch(struct shash_desc *desc, u8 *out) +static int crypto_sha256_update_arch(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + return crypto_sha256_update(desc, data, len, false); +} + +static int crypto_sha256_final_lib(struct shash_desc *desc, u8 *out) { sha256_final(shash_desc_ctx(desc), out); return 0; @@ -93,11 +99,7 @@ static int crypto_sha256_finup_generic(struct shash_desc *desc, const u8 *data, static int crypto_sha256_finup_arch(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - struct sha256_state *sctx = shash_desc_ctx(desc); - - sha256_update(sctx, data, len); - sha256_final(sctx, out); - return 0; + return crypto_sha256_finup(desc, data, len, out, false); } static int crypto_sha256_digest_generic(struct shash_desc *desc, const u8 *data, @@ -107,20 +109,27 @@ static int crypto_sha256_digest_generic(struct shash_desc *desc, const u8 *data, return crypto_sha256_finup_generic(desc, data, len, out); } -static int crypto_sha256_digest_arch(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) +static int crypto_sha256_digest_lib(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) { sha256(data, len, out); return 0; } +static int crypto_sha256_digest_arch(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + crypto_sha256_init(desc); + return crypto_sha256_finup_arch(desc, data, len, out); +} + static int crypto_sha224_init(struct shash_desc *desc) { sha224_block_init(shash_desc_ctx(desc)); return 0; } -static int crypto_sha224_final_arch(struct shash_desc *desc, u8 *out) +static int crypto_sha224_final_lib(struct shash_desc *desc, u8 *out) { sha224_final(shash_desc_ctx(desc), out); return 0; @@ -184,16 +193,14 @@ static struct shash_alg algs[] = { }, { .base.cra_name = "sha256", - .base.cra_driver_name = "sha256-" __stringify(ARCH), - .base.cra_priority = 300, + .base.cra_driver_name = "sha256-lib", .base.cra_blocksize = SHA256_BLOCK_SIZE, .base.cra_module = THIS_MODULE, .digestsize = SHA256_DIGEST_SIZE, .init = crypto_sha256_init, - .update = crypto_sha256_update_arch, - .final = crypto_sha256_final_arch, - .finup = crypto_sha256_finup_arch, - .digest = crypto_sha256_digest_arch, + .update = crypto_sha256_update_lib, + .final = crypto_sha256_final_lib, + .digest = crypto_sha256_digest_lib, .descsize = sizeof(struct sha256_state), .statesize = sizeof(struct crypto_sha256_state) + SHA256_BLOCK_SIZE + 1, @@ -202,20 +209,48 @@ static struct shash_alg algs[] = { }, { .base.cra_name = "sha224", - .base.cra_driver_name = "sha224-" __stringify(ARCH), - .base.cra_priority = 300, + .base.cra_driver_name = "sha224-lib", .base.cra_blocksize = SHA224_BLOCK_SIZE, .base.cra_module = THIS_MODULE, .digestsize = SHA224_DIGEST_SIZE, .init = crypto_sha224_init, - .update = crypto_sha256_update_arch, - .final = crypto_sha224_final_arch, + .update = crypto_sha256_update_lib, + .final = crypto_sha224_final_lib, .descsize = sizeof(struct sha256_state), .statesize = sizeof(struct crypto_sha256_state) + SHA256_BLOCK_SIZE + 1, .import = crypto_sha256_import_lib, .export = crypto_sha256_export_lib, }, + { + .base.cra_name = "sha256", + .base.cra_driver_name = "sha256-" __stringify(ARCH), + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, + .base.cra_blocksize = SHA256_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + .digestsize = SHA256_DIGEST_SIZE, + .init = crypto_sha256_init, + .update = crypto_sha256_update_arch, + .finup = crypto_sha256_finup_arch, + .digest = crypto_sha256_digest_arch, + .descsize = sizeof(struct crypto_sha256_state), + }, + { + .base.cra_name = "sha224", + .base.cra_driver_name = "sha224-" __stringify(ARCH), + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, + .base.cra_blocksize = SHA224_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + .digestsize = SHA224_DIGEST_SIZE, + .init = crypto_sha224_init, + .update = crypto_sha256_update_arch, + .finup = crypto_sha256_finup_arch, + .descsize = sizeof(struct crypto_sha256_state), + }, }; static unsigned int num_algs; @@ -224,9 +259,9 @@ static int __init crypto_sha256_mod_init(void) { /* register the arch flavours only if they differ from generic */ num_algs = ARRAY_SIZE(algs); - BUILD_BUG_ON(ARRAY_SIZE(algs) % 2 != 0); + BUILD_BUG_ON(ARRAY_SIZE(algs) <= 2); if (!sha256_is_arch_optimized()) - num_algs /= 2; + num_algs -= 2; return crypto_register_shashes(algs, ARRAY_SIZE(algs)); } module_init(crypto_sha256_mod_init); -- 2.51.0 From 08811169ac016a234765e23deb45a5c8dd8aee6b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 2 May 2025 17:00:43 +0800 Subject: [PATCH 06/16] crypto: s390/hmac - Use API partial block handling Use the Crypto API partial block handling. Also switch to the generic export format. Signed-off-by: Herbert Xu --- arch/s390/crypto/hmac_s390.c | 155 ++++++++++++++++++++++++----------- 1 file changed, 108 insertions(+), 47 deletions(-) diff --git a/arch/s390/crypto/hmac_s390.c b/arch/s390/crypto/hmac_s390.c index e6edf1013228..93a1098d9f8d 100644 --- a/arch/s390/crypto/hmac_s390.c +++ b/arch/s390/crypto/hmac_s390.c @@ -9,10 +9,14 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include -#include #include +#include +#include #include +#include +#include #include +#include /* * KMAC param block layout for sha2 function codes: @@ -71,7 +75,6 @@ union s390_kmac_gr0 { struct s390_kmac_sha2_ctx { u8 param[MAX_DIGEST_SIZE + MAX_IMBL_SIZE + MAX_BLOCK_SIZE]; union s390_kmac_gr0 gr0; - u8 buf[MAX_BLOCK_SIZE]; u64 buflen[2]; }; @@ -95,8 +98,8 @@ static inline void kmac_sha2_set_imbl(u8 *param, u64 buflen_lo, } } -static int hash_key(const u8 *in, unsigned int inlen, - u8 *digest, unsigned int digestsize) +static int hash_data(const u8 *in, unsigned int inlen, + u8 *digest, unsigned int digestsize, bool final) { unsigned long func; union { @@ -123,19 +126,23 @@ static int hash_key(const u8 *in, unsigned int inlen, switch (digestsize) { case SHA224_DIGEST_SIZE: - func = CPACF_KLMD_SHA_256; + func = final ? CPACF_KLMD_SHA_256 : CPACF_KIMD_SHA_256; PARAM_INIT(256, 224, inlen * 8); + if (!final) + digestsize = SHA256_DIGEST_SIZE; break; case SHA256_DIGEST_SIZE: - func = CPACF_KLMD_SHA_256; + func = final ? CPACF_KLMD_SHA_256 : CPACF_KIMD_SHA_256; PARAM_INIT(256, 256, inlen * 8); break; case SHA384_DIGEST_SIZE: - func = CPACF_KLMD_SHA_512; + func = final ? CPACF_KLMD_SHA_512 : CPACF_KIMD_SHA_512; PARAM_INIT(512, 384, inlen * 8); + if (!final) + digestsize = SHA512_DIGEST_SIZE; break; case SHA512_DIGEST_SIZE: - func = CPACF_KLMD_SHA_512; + func = final ? CPACF_KLMD_SHA_512 : CPACF_KIMD_SHA_512; PARAM_INIT(512, 512, inlen * 8); break; default: @@ -151,6 +158,12 @@ static int hash_key(const u8 *in, unsigned int inlen, return 0; } +static int hash_key(const u8 *in, unsigned int inlen, + u8 *digest, unsigned int digestsize) +{ + return hash_data(in, inlen, digest, digestsize, true); +} + static int s390_hmac_sha2_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { @@ -204,50 +217,31 @@ static int s390_hmac_sha2_update(struct shash_desc *desc, { struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc); unsigned int bs = crypto_shash_blocksize(desc->tfm); - unsigned int offset, n; + unsigned int n = round_down(len, bs); - /* check current buffer */ - offset = ctx->buflen[0] % bs; - ctx->buflen[0] += len; - if (ctx->buflen[0] < len) + ctx->buflen[0] += n; + if (ctx->buflen[0] < n) ctx->buflen[1]++; - if (offset + len < bs) - goto store; - - /* process one stored block */ - if (offset) { - n = bs - offset; - memcpy(ctx->buf + offset, data, n); - ctx->gr0.iimp = 1; - _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, bs); - data += n; - len -= n; - offset = 0; - } - /* process as many blocks as possible */ - if (len >= bs) { - n = (len / bs) * bs; - ctx->gr0.iimp = 1; - _cpacf_kmac(&ctx->gr0.reg, ctx->param, data, n); - data += n; - len -= n; - } -store: - /* store incomplete block in buffer */ - if (len) - memcpy(ctx->buf + offset, data, len); - return 0; + /* process as many blocks as possible */ + ctx->gr0.iimp = 1; + _cpacf_kmac(&ctx->gr0.reg, ctx->param, data, n); + return len - n; } -static int s390_hmac_sha2_final(struct shash_desc *desc, u8 *out) +static int s390_hmac_sha2_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc); unsigned int bs = crypto_shash_blocksize(desc->tfm); + ctx->buflen[0] += len; + if (ctx->buflen[0] < len) + ctx->buflen[1]++; + ctx->gr0.iimp = 0; kmac_sha2_set_imbl(ctx->param, ctx->buflen[0], ctx->buflen[1], bs); - _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, ctx->buflen[0] % bs); + _cpacf_kmac(&ctx->gr0.reg, ctx->param, src, len); memcpy(out, ctx->param, crypto_shash_digestsize(desc->tfm)); return 0; @@ -273,22 +267,89 @@ static int s390_hmac_sha2_digest(struct shash_desc *desc, return 0; } -#define S390_HMAC_SHA2_ALG(x) { \ +static int s390_hmac_export_zero(struct shash_desc *desc, void *out) +{ + struct crypto_shash *tfm = desc->tfm; + u8 ipad[SHA512_BLOCK_SIZE]; + struct s390_hmac_ctx *ctx; + unsigned int bs; + int err, i; + + ctx = crypto_shash_ctx(tfm); + bs = crypto_shash_blocksize(tfm); + for (i = 0; i < bs; i++) + ipad[i] = ctx->key[i] ^ HMAC_IPAD_VALUE; + + err = hash_data(ipad, bs, out, crypto_shash_digestsize(tfm), false); + memzero_explicit(ipad, sizeof(ipad)); + return err; +} + +static int s390_hmac_export(struct shash_desc *desc, void *out) +{ + struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc); + unsigned int bs = crypto_shash_blocksize(desc->tfm); + unsigned int ds = bs / 2; + union { + u8 *u8; + u64 *u64; + } p = { .u8 = out }; + int err = 0; + + if (!ctx->gr0.ikp) + err = s390_hmac_export_zero(desc, out); + else + memcpy(p.u8, ctx->param, ds); + p.u8 += ds; + put_unaligned(ctx->buflen[0], p.u64++); + if (ds == SHA512_DIGEST_SIZE) + put_unaligned(ctx->buflen[1], p.u64); + return err; +} + +static int s390_hmac_import(struct shash_desc *desc, const void *in) +{ + struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc); + unsigned int bs = crypto_shash_blocksize(desc->tfm); + unsigned int ds = bs / 2; + union { + const u8 *u8; + const u64 *u64; + } p = { .u8 = in }; + int err; + + err = s390_hmac_sha2_init(desc); + memcpy(ctx->param, p.u8, ds); + p.u8 += ds; + ctx->buflen[0] = get_unaligned(p.u64++); + if (ds == SHA512_DIGEST_SIZE) + ctx->buflen[1] = get_unaligned(p.u64); + if (ctx->buflen[0] | ctx->buflen[1]) + ctx->gr0.ikp = 1; + return err; +} + +#define S390_HMAC_SHA2_ALG(x, ss) { \ .fc = CPACF_KMAC_HMAC_SHA_##x, \ .alg = { \ .init = s390_hmac_sha2_init, \ .update = s390_hmac_sha2_update, \ - .final = s390_hmac_sha2_final, \ + .finup = s390_hmac_sha2_finup, \ .digest = s390_hmac_sha2_digest, \ .setkey = s390_hmac_sha2_setkey, \ + .export = s390_hmac_export, \ + .import = s390_hmac_import, \ .descsize = sizeof(struct s390_kmac_sha2_ctx), \ .halg = { \ + .statesize = ss, \ .digestsize = SHA##x##_DIGEST_SIZE, \ .base = { \ .cra_name = "hmac(sha" #x ")", \ .cra_driver_name = "hmac_s390_sha" #x, \ .cra_blocksize = SHA##x##_BLOCK_SIZE, \ .cra_priority = 400, \ + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | \ + CRYPTO_AHASH_ALG_FINUP_MAX, \ .cra_ctxsize = sizeof(struct s390_hmac_ctx), \ .cra_module = THIS_MODULE, \ }, \ @@ -301,10 +362,10 @@ static struct s390_hmac_alg { unsigned int fc; struct shash_alg alg; } s390_hmac_algs[] = { - S390_HMAC_SHA2_ALG(224), - S390_HMAC_SHA2_ALG(256), - S390_HMAC_SHA2_ALG(384), - S390_HMAC_SHA2_ALG(512), + S390_HMAC_SHA2_ALG(224, sizeof(struct crypto_sha256_state)), + S390_HMAC_SHA2_ALG(256, sizeof(struct crypto_sha256_state)), + S390_HMAC_SHA2_ALG(384, SHA512_STATE_SIZE), + S390_HMAC_SHA2_ALG(512, SHA512_STATE_SIZE), }; static __always_inline void _s390_hmac_algs_unregister(void) -- 2.51.0 From f1fb16efe50bb7953ef3c224e1b3802559d26cc3 Mon Sep 17 00:00:00 2001 From: Ethan Carter Edwards Date: Sat, 3 May 2025 16:21:27 -0400 Subject: [PATCH 07/16] crypto: hisilicon/qm - remove sizeof(char) `sizeof(char)` evaluates to 1. Remove the churn. Signed-off-by: Ethan Carter Edwards Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index d3f5d108b898..80d57f0dbf26 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -862,7 +862,7 @@ int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *d return -EINVAL; } - algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL); + algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN, GFP_KERNEL); if (!algs) return -ENOMEM; -- 2.51.0 From 7c234e138c67c1157b17f1fab8cc95d62f8f4444 Mon Sep 17 00:00:00 2001 From: Ethan Carter Edwards Date: Sat, 3 May 2025 16:21:28 -0400 Subject: [PATCH 08/16] crypto: hisilicon/qm - replace devm_kzalloc with devm_kcalloc Replace devm_kzalloc that has an internal multiplication with devm_kcalloc to improve code readability and safety from overflows. Signed-off-by: Ethan Carter Edwards Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 80d57f0dbf26..7c41f9593d03 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -5224,7 +5224,7 @@ static int qm_pre_store_caps(struct hisi_qm *qm) size_t i, size; size = ARRAY_SIZE(qm_cap_query_info); - qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL); + qm_cap = devm_kcalloc(&pdev->dev, sizeof(*qm_cap), size, GFP_KERNEL); if (!qm_cap) return -ENOMEM; -- 2.51.0 From ecd71c95a60e7298acfabe81189439f350bd0e18 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 4 May 2025 14:13:50 +0800 Subject: [PATCH 09/16] crypto: zynqmp-sha - Fix partial block implementation The zynqmp-sha partial block was based on an old design of the partial block API where the leftover calculation was done in the Crypto API. As the leftover calculation is now done by the algorithm, fix this by passing the partial blocks to the fallback. Also zero the stack descriptors. Fixes: 201e9ec3b621 ("crypto: zynqmp-sha - Use API partial block handling") Signed-off-by: Herbert Xu --- drivers/crypto/xilinx/zynqmp-sha.c | 30 +++++++++++++++++++----------- include/crypto/sha3.h | 4 ++++ 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c index 67cf8d990a1d..5813017b6b79 100644 --- a/drivers/crypto/xilinx/zynqmp-sha.c +++ b/drivers/crypto/xilinx/zynqmp-sha.c @@ -59,7 +59,7 @@ static int zynqmp_sha_init_tfm(struct crypto_shash *hash) return PTR_ERR(fallback_tfm); if (crypto_shash_descsize(hash) < - crypto_shash_descsize(tfm_ctx->fbk_tfm)) { + crypto_shash_statesize(tfm_ctx->fbk_tfm)) { crypto_free_shash(fallback_tfm); return -EINVAL; } @@ -76,15 +76,24 @@ static void zynqmp_sha_exit_tfm(struct crypto_shash *hash) crypto_free_shash(tfm_ctx->fbk_tfm); } +static int zynqmp_sha_continue(struct shash_desc *desc, + struct shash_desc *fbdesc, int err) +{ + err = err ?: crypto_shash_export(fbdesc, shash_desc_ctx(desc)); + shash_desc_zero(fbdesc); + return err; +} + static int zynqmp_sha_init(struct shash_desc *desc) { struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); struct crypto_shash *fbtfm = tctx->fbk_tfm; SHASH_DESC_ON_STACK(fbdesc, fbtfm); + int err; fbdesc->tfm = fbtfm; - return crypto_shash_init(fbdesc) ?: - crypto_shash_export_core(fbdesc, shash_desc_ctx(desc)); + err = crypto_shash_init(fbdesc); + return zynqmp_sha_continue(desc, fbdesc, err); } static int zynqmp_sha_update(struct shash_desc *desc, const u8 *data, unsigned int length) @@ -92,11 +101,12 @@ static int zynqmp_sha_update(struct shash_desc *desc, const u8 *data, unsigned i struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); struct crypto_shash *fbtfm = tctx->fbk_tfm; SHASH_DESC_ON_STACK(fbdesc, fbtfm); + int err; fbdesc->tfm = fbtfm; - return crypto_shash_import_core(fbdesc, shash_desc_ctx(desc)) ?: - crypto_shash_update(fbdesc, data, length) ?: - crypto_shash_export_core(fbdesc, shash_desc_ctx(desc)); + err = crypto_shash_import(fbdesc, shash_desc_ctx(desc)) ?: + crypto_shash_update(fbdesc, data, length); + return zynqmp_sha_continue(desc, fbdesc, err); } static int zynqmp_sha_finup(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out) @@ -106,7 +116,7 @@ static int zynqmp_sha_finup(struct shash_desc *desc, const u8 *data, unsigned in SHASH_DESC_ON_STACK(fbdesc, fbtfm); fbdesc->tfm = fbtfm; - return crypto_shash_import_core(fbdesc, shash_desc_ctx(desc)) ?: + return crypto_shash_import(fbdesc, shash_desc_ctx(desc)) ?: crypto_shash_finup(fbdesc, data, length, out); } @@ -160,16 +170,14 @@ static struct zynqmp_sha_drv_ctx sha3_drv_ctx = { .digest = zynqmp_sha_digest, .init_tfm = zynqmp_sha_init_tfm, .exit_tfm = zynqmp_sha_exit_tfm, - .descsize = sizeof(struct sha3_state), + .descsize = SHA3_384_EXPORT_SIZE, .digestsize = SHA3_384_DIGEST_SIZE, .base = { .cra_name = "sha3-384", .cra_driver_name = "zynqmp-sha3-384", .cra_priority = 300, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_NEED_FALLBACK | - CRYPTO_AHASH_ALG_BLOCK_ONLY | - CRYPTO_AHASH_ALG_FINUP_MAX, + CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA3_384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct zynqmp_sha_tfm_ctx), .cra_module = THIS_MODULE, diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h index 3c2559f51ada..41e1b83a6d91 100644 --- a/include/crypto/sha3.h +++ b/include/crypto/sha3.h @@ -9,15 +9,19 @@ #define SHA3_224_DIGEST_SIZE (224 / 8) #define SHA3_224_BLOCK_SIZE (200 - 2 * SHA3_224_DIGEST_SIZE) +#define SHA3_224_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_224_BLOCK_SIZE + 1 #define SHA3_256_DIGEST_SIZE (256 / 8) #define SHA3_256_BLOCK_SIZE (200 - 2 * SHA3_256_DIGEST_SIZE) +#define SHA3_256_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_256_BLOCK_SIZE + 1 #define SHA3_384_DIGEST_SIZE (384 / 8) #define SHA3_384_BLOCK_SIZE (200 - 2 * SHA3_384_DIGEST_SIZE) +#define SHA3_384_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_384_BLOCK_SIZE + 1 #define SHA3_512_DIGEST_SIZE (512 / 8) #define SHA3_512_BLOCK_SIZE (200 - 2 * SHA3_512_DIGEST_SIZE) +#define SHA3_512_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_512_BLOCK_SIZE + 1 #define SHA3_STATE_SIZE 200 -- 2.51.0 From 2b1a29ce3360570aeff053d1315cd504d94eab31 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 4 May 2025 21:33:14 +0800 Subject: [PATCH 10/16] crypto: shash - Cap state size to HASH_MAX_STATESIZE Now that all shash algorithms have converted over to the generic export format, limit the shash state size to HASH_MAX_STATESIZE. Signed-off-by: Herbert Xu --- crypto/shash.c | 2 ++ include/crypto/hash.h | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/crypto/shash.c b/crypto/shash.c index c4a724e55d7a..44a6df3132ad 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -479,6 +479,8 @@ static int shash_prepare_alg(struct shash_alg *alg) if (alg->descsize > HASH_MAX_DESCSIZE) return -EINVAL; + if (alg->statesize > HASH_MAX_STATESIZE) + return -EINVAL; return 0; } diff --git a/include/crypto/hash.h b/include/crypto/hash.h index c2497c300a28..e0321b5ec363 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -167,8 +167,11 @@ struct shash_desc { #define HASH_MAX_DIGESTSIZE 64 +/* Worst case is sha3-224. */ +#define HASH_MAX_STATESIZE 200 + 144 + 1 + /* - * Worst case is hmac(sha-224-s390). Its context is a nested 'shash_desc' + * Worst case is hmac(sha3-224-s390). Its context is a nested 'shash_desc' * containing a 'struct s390_sha_ctx'. */ #define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360) -- 2.51.0 From f4e365d5ca38941708fbe8356719e3436cef7627 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 4 May 2025 21:33:16 +0800 Subject: [PATCH 11/16] crypto: shash - Mark shash algorithms as REQ_VIRT Mark shash algorithms with the REQ_VIRT bit as they can handle virtual addresses as is. Signed-off-by: Herbert Xu --- crypto/shash.c | 1 + 1 file changed, 1 insertion(+) diff --git a/crypto/shash.c b/crypto/shash.c index 44a6df3132ad..dee391d47f51 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -450,6 +450,7 @@ static int shash_prepare_alg(struct shash_alg *alg) base->cra_type = &crypto_shash_type; base->cra_flags |= CRYPTO_ALG_TYPE_SHASH; + base->cra_flags |= CRYPTO_ALG_REQ_VIRT; /* * Handle missing optional functions. For each one we can either -- 2.51.0 From fd66f2ab09b8305006764887cc47eeeb1ca5704b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 4 May 2025 21:33:18 +0800 Subject: [PATCH 12/16] crypto: ahash - Enforce MAX_SYNC_HASH_REQSIZE for sync ahash As sync ahash algorithms (currently there are none) are used without a fallback, ensure that they obey the MAX_SYNC_HASH_REQSIZE rule just like shash algorithms. Signed-off-by: Herbert Xu --- crypto/ahash.c | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index 57c131a13067..736e9fb5d0a4 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -760,23 +760,28 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) tfm->exit = crypto_ahash_exit_tfm; - if (!alg->init_tfm) { - if (!tfm->__crt_alg->cra_init) - return 0; - + if (alg->init_tfm) + err = alg->init_tfm(hash); + else if (tfm->__crt_alg->cra_init) err = tfm->__crt_alg->cra_init(tfm); - if (err) - goto out_free_sync_hash; - + else return 0; - } - err = alg->init_tfm(hash); if (err) goto out_free_sync_hash; + if (!ahash_is_async(hash) && crypto_ahash_reqsize(hash) > + MAX_SYNC_HASH_REQSIZE) + goto out_exit_tfm; + return 0; +out_exit_tfm: + if (alg->exit_tfm) + alg->exit_tfm(hash); + else if (tfm->__crt_alg->cra_exit) + tfm->__crt_alg->cra_exit(tfm); + err = -EINVAL; out_free_sync_hash: crypto_free_ahash(fb); return err; @@ -954,6 +959,10 @@ static int ahash_prepare_alg(struct ahash_alg *alg) if (base->cra_reqsize && base->cra_reqsize < alg->halg.statesize) return -EINVAL; + if (!(base->cra_flags & CRYPTO_ALG_ASYNC) && + base->cra_reqsize > MAX_SYNC_HASH_REQSIZE) + return -EINVAL; + err = hash_prepare_alg(&alg->halg); if (err) return err; -- 2.51.0 From 1052671ca118b79fa3f5de281bba850aaf20bbf5 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 4 May 2025 21:33:21 +0800 Subject: [PATCH 13/16] crypto: ahash - Add core export and import Add crypto_ahash_export_core and crypto_ahash_import_core. For now they only differ from the normal export/import functions when going through shash. Signed-off-by: Herbert Xu --- crypto/ahash.c | 25 ++++++++++++++++++++++++- include/crypto/hash.h | 24 ++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index 736e9fb5d0a4..344bf1b43e71 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -698,6 +698,16 @@ static int ahash_def_finup(struct ahash_request *req) return ahash_def_finup_finish1(req, err); } +int crypto_ahash_export_core(struct ahash_request *req, void *out) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (likely(tfm->using_shash)) + return crypto_shash_export_core(ahash_request_ctx(req), out); + return crypto_ahash_alg(tfm)->export(req, out); +} +EXPORT_SYMBOL_GPL(crypto_ahash_export_core); + int crypto_ahash_export(struct ahash_request *req, void *out) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -708,6 +718,19 @@ int crypto_ahash_export(struct ahash_request *req, void *out) } EXPORT_SYMBOL_GPL(crypto_ahash_export); +int crypto_ahash_import_core(struct ahash_request *req, const void *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (likely(tfm->using_shash)) + return crypto_shash_import_core(prepare_shash_desc(req, tfm), + in); + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + return crypto_ahash_alg(tfm)->import(req, in); +} +EXPORT_SYMBOL_GPL(crypto_ahash_import_core); + int crypto_ahash_import(struct ahash_request *req, const void *in) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -716,7 +739,7 @@ int crypto_ahash_import(struct ahash_request *req, const void *in) return crypto_shash_import(prepare_shash_desc(req, tfm), in); if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; - return crypto_ahash_alg(tfm)->import(req, in); + return crypto_ahash_import_core(req, in); } EXPORT_SYMBOL_GPL(crypto_ahash_import); diff --git a/include/crypto/hash.h b/include/crypto/hash.h index e0321b5ec363..1760662ad70a 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -506,6 +506,18 @@ int crypto_ahash_digest(struct ahash_request *req); */ int crypto_ahash_export(struct ahash_request *req, void *out); +/** + * crypto_ahash_export_core() - extract core state for message digest + * @req: reference to the ahash_request handle whose state is exported + * @out: output buffer of sufficient size that can hold the hash state + * + * Export the hash state without the partial block buffer. + * + * Context: Softirq or process context. + * Return: 0 if the export creation was successful; < 0 if an error occurred + */ +int crypto_ahash_export_core(struct ahash_request *req, void *out); + /** * crypto_ahash_import() - import message digest state * @req: reference to ahash_request handle the state is imported into @@ -519,6 +531,18 @@ int crypto_ahash_export(struct ahash_request *req, void *out); */ int crypto_ahash_import(struct ahash_request *req, const void *in); +/** + * crypto_ahash_import_core() - import core state + * @req: reference to ahash_request handle the state is imported into + * @in: buffer holding the state + * + * Import the hash state without the partial block buffer. + * + * Context: Softirq or process context. + * Return: 0 if the import was successful; < 0 if an error occurred + */ +int crypto_ahash_import_core(struct ahash_request *req, const void *in); + /** * crypto_ahash_init() - (re)initialize message digest handle * @req: ahash_request handle that already is initialized with all necessary -- 2.51.0 From 88bca957e87e9a8a274213db257a744170b7248a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 4 May 2025 21:33:23 +0800 Subject: [PATCH 14/16] crypto: ahash - Add HASH_REQUEST_ZERO Add a helper to zero hash stack requests that were never cloned off the stack. Signed-off-by: Herbert Xu --- include/crypto/internal/hash.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index e911f32f46dc..f2bbdb74e11a 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -301,5 +301,9 @@ static inline unsigned int crypto_shash_coresize(struct crypto_shash *tfm) return crypto_shash_statesize(tfm) - crypto_shash_blocksize(tfm) - 1; } +/* This can only be used if the request was never cloned. */ +#define HASH_REQUEST_ZERO(name) \ + memzero_explicit(__##name##_req, sizeof(__##name##_req)) + #endif /* _CRYPTO_INTERNAL_HASH_H */ -- 2.51.0 From 870c1f0dc2a5e67eb0697ae14365b930684ddfa5 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 4 May 2025 21:33:25 +0800 Subject: [PATCH 15/16] crypto: padlock-sha - Use core import and export for fallback As padlock-sha is block-only, it needs to use core import and export on the fallback. Also call sha256_block_init instead of sha256_init although this is harmless as sha256_init doesn't write into the partial block area. Fixes: 63dc06cd12f9 ("crypto: padlock-sha - Use API partial block handling") Signed-off-by: Herbert Xu --- drivers/crypto/padlock-sha.c | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index c89b9c6b5f4c..329f60ad422e 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -42,27 +42,33 @@ static int padlock_sha1_init(struct shash_desc *desc) static int padlock_sha256_init(struct shash_desc *desc) { - struct sha256_state *sctx = padlock_shash_desc_ctx(desc); + struct crypto_sha256_state *sctx = padlock_shash_desc_ctx(desc); - sha256_init(sctx); + sha256_block_init(sctx); return 0; } static int padlock_sha_update(struct shash_desc *desc, const u8 *data, unsigned int length) { - struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *state = padlock_shash_desc_ctx(desc); - HASH_REQUEST_ON_STACK(req, ctx->fallback); - int remain; + struct crypto_shash *tfm = desc->tfm; + int err, remain; + + remain = length - round_down(length, crypto_shash_blocksize(tfm)); + { + struct padlock_sha_ctx *ctx = crypto_shash_ctx(tfm); + HASH_REQUEST_ON_STACK(req, ctx->fallback); + + ahash_request_set_callback(req, 0, NULL, NULL); + ahash_request_set_virt(req, data, NULL, length - remain); + err = crypto_ahash_import_core(req, state) ?: + crypto_ahash_update(req) ?: + crypto_ahash_export_core(req, state); + HASH_REQUEST_ZERO(req); + } - ahash_request_set_callback(req, 0, NULL, NULL); - ahash_request_set_virt(req, data, NULL, length); - remain = crypto_ahash_import(req, state) ?: - crypto_ahash_update(req); - if (remain < 0) - return remain; - return crypto_ahash_export(req, state) ?: remain; + return err ?: remain; } static int padlock_sha_export(struct shash_desc *desc, void *out) @@ -101,7 +107,7 @@ static int padlock_sha_finup(struct shash_desc *desc, const u8 *in, ahash_request_set_callback(req, 0, NULL, NULL); ahash_request_set_virt(req, in, out, count); - return crypto_ahash_import(req, padlock_shash_desc_ctx(desc)) ?: + return crypto_ahash_import_core(req, padlock_shash_desc_ctx(desc)) ?: crypto_ahash_finup(req); } @@ -165,7 +171,7 @@ static int padlock_init_tfm(struct crypto_shash *hash) return PTR_ERR(fallback_tfm); } - if (crypto_shash_statesize(hash) < + if (crypto_shash_statesize(hash) != crypto_ahash_statesize(fallback_tfm)) { crypto_free_ahash(fallback_tfm); return -EINVAL; -- 2.51.0 From 64745a9ca890ed60d78162ec511e1983e1946d73 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 5 May 2025 20:54:41 +0800 Subject: [PATCH 16/16] crypto: s390/sha512 - Initialise upper counter to zero for sha384 Initialise the high bit counter to zero in sha384_init. Also change the state initialisation to use ctx->sha512.state instead of ctx->state for consistency. Fixes: 572b5c4682c7 ("crypto: s390/sha512 - Use API partial block handling") Reported-by: Ingo Franzki Reported-by: Harald Freudenberger Signed-off-by: Herbert Xu --- arch/s390/crypto/sha512_s390.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c index 3c5175e6dda6..33711a29618c 100644 --- a/arch/s390/crypto/sha512_s390.c +++ b/arch/s390/crypto/sha512_s390.c @@ -86,15 +86,16 @@ static int sha384_init(struct shash_desc *desc) { struct s390_sha_ctx *ctx = shash_desc_ctx(desc); - *(__u64 *)&ctx->state[0] = SHA384_H0; - *(__u64 *)&ctx->state[2] = SHA384_H1; - *(__u64 *)&ctx->state[4] = SHA384_H2; - *(__u64 *)&ctx->state[6] = SHA384_H3; - *(__u64 *)&ctx->state[8] = SHA384_H4; - *(__u64 *)&ctx->state[10] = SHA384_H5; - *(__u64 *)&ctx->state[12] = SHA384_H6; - *(__u64 *)&ctx->state[14] = SHA384_H7; + ctx->sha512.state[0] = SHA384_H0; + ctx->sha512.state[1] = SHA384_H1; + ctx->sha512.state[2] = SHA384_H2; + ctx->sha512.state[3] = SHA384_H3; + ctx->sha512.state[4] = SHA384_H4; + ctx->sha512.state[5] = SHA384_H5; + ctx->sha512.state[6] = SHA384_H6; + ctx->sha512.state[7] = SHA384_H7; ctx->count = 0; + ctx->sha512.count_hi = 0; ctx->func = CPACF_KIMD_SHA_512; return 0; -- 2.51.0