From 036454de0fec8d9789f3730f701e8df6049b5635 Mon Sep 17 00:00:00 2001 From: "Rob Herring (Arm)" Date: Tue, 13 May 2025 13:41:10 -0500 Subject: [PATCH 01/16] dt-bindings: crypto: Convert hisilicon,hip0{6,7}-sec to DT schema Convert the HiSilicon HIP06/7 Security Accelerator binding to DT schema format. It's a straight forward conversion. Signed-off-by: Rob Herring (Arm) Signed-off-by: Herbert Xu --- .../bindings/crypto/hisilicon,hip06-sec.yaml | 134 ++++++++++++++++++ .../bindings/crypto/hisilicon,hip07-sec.txt | 67 --------- 2 files changed, 134 insertions(+), 67 deletions(-) create mode 100644 Documentation/devicetree/bindings/crypto/hisilicon,hip06-sec.yaml delete mode 100644 Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt diff --git a/Documentation/devicetree/bindings/crypto/hisilicon,hip06-sec.yaml b/Documentation/devicetree/bindings/crypto/hisilicon,hip06-sec.yaml new file mode 100644 index 000000000000..2bfac9d1c020 --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/hisilicon,hip06-sec.yaml @@ -0,0 +1,134 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/crypto/hisilicon,hip06-sec.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Hisilicon hip06/hip07 Security Accelerator + +maintainers: + - Jonathan Cameron + +properties: + compatible: + enum: + - hisilicon,hip06-sec + - hisilicon,hip07-sec + + reg: + items: + - description: Registers for backend processing engines + - description: Registers for common functionality + - description: Registers for queue 0 + - description: Registers for queue 1 + - description: Registers for queue 2 + - description: Registers for queue 3 + - description: Registers for queue 4 + - description: Registers for queue 5 + - description: Registers for queue 6 + - description: Registers for queue 7 + - description: Registers for queue 8 + - description: Registers for queue 9 + - description: Registers for queue 10 + - description: Registers for queue 11 + - description: Registers for queue 12 + - description: Registers for queue 13 + - description: Registers for queue 14 + - description: Registers for queue 15 + + interrupts: + items: + - description: SEC unit error queue interrupt + - description: Completion interrupt for queue 0 + - description: Error interrupt for queue 0 + - description: Completion interrupt for queue 1 + - description: Error interrupt for queue 1 + - description: Completion interrupt for queue 2 + - description: Error interrupt for queue 2 + - description: Completion interrupt for queue 3 + - description: Error interrupt for queue 3 + - description: Completion interrupt for queue 4 + - description: Error interrupt for queue 4 + - description: Completion interrupt for queue 5 + - description: Error interrupt for queue 5 + - description: Completion interrupt for queue 6 + - description: Error interrupt for queue 6 + - description: Completion interrupt for queue 7 + - description: Error interrupt for queue 7 + - description: Completion interrupt for queue 8 + - description: Error interrupt for queue 8 + - description: Completion interrupt for queue 9 + - description: Error interrupt for queue 9 + - description: Completion interrupt for queue 10 + - description: Error interrupt for queue 10 + - description: Completion interrupt for queue 11 + - description: Error interrupt for queue 11 + - description: Completion interrupt for queue 12 + - description: Error interrupt for queue 12 + - description: Completion interrupt for queue 13 + - description: Error interrupt for queue 13 + - description: Completion interrupt for queue 14 + - description: Error interrupt for queue 14 + - description: Completion interrupt for queue 15 + - description: Error interrupt for queue 15 + + dma-coherent: true + + iommus: + maxItems: 1 + +required: + - compatible + - reg + - interrupts + - dma-coherent + +additionalProperties: false + +examples: + - | + bus { + #address-cells = <2>; + #size-cells = <2>; + + crypto@400d2000000 { + compatible = "hisilicon,hip07-sec"; + reg = <0x400 0xd0000000 0x0 0x10000 + 0x400 0xd2000000 0x0 0x10000 + 0x400 0xd2010000 0x0 0x10000 + 0x400 0xd2020000 0x0 0x10000 + 0x400 0xd2030000 0x0 0x10000 + 0x400 0xd2040000 0x0 0x10000 + 0x400 0xd2050000 0x0 0x10000 + 0x400 0xd2060000 0x0 0x10000 + 0x400 0xd2070000 0x0 0x10000 + 0x400 0xd2080000 0x0 0x10000 + 0x400 0xd2090000 0x0 0x10000 + 0x400 0xd20a0000 0x0 0x10000 + 0x400 0xd20b0000 0x0 0x10000 + 0x400 0xd20c0000 0x0 0x10000 + 0x400 0xd20d0000 0x0 0x10000 + 0x400 0xd20e0000 0x0 0x10000 + 0x400 0xd20f0000 0x0 0x10000 + 0x400 0xd2100000 0x0 0x10000>; + interrupts = <576 4>, + <577 1>, <578 4>, + <579 1>, <580 4>, + <581 1>, <582 4>, + <583 1>, <584 4>, + <585 1>, <586 4>, + <587 1>, <588 4>, + <589 1>, <590 4>, + <591 1>, <592 4>, + <593 1>, <594 4>, + <595 1>, <596 4>, + <597 1>, <598 4>, + <599 1>, <600 4>, + <601 1>, <602 4>, + <603 1>, <604 4>, + <605 1>, <606 4>, + <607 1>, <608 4>; + dma-coherent; + iommus = <&p1_smmu_alg_a 0x600>; + }; + }; diff --git a/Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt b/Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt deleted file mode 100644 index d28fd1af01b4..000000000000 --- a/Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt +++ /dev/null @@ -1,67 +0,0 @@ -* Hisilicon hip07 Security Accelerator (SEC) - -Required properties: -- compatible: Must contain one of - - "hisilicon,hip06-sec" - - "hisilicon,hip07-sec" -- reg: Memory addresses and lengths of the memory regions through which - this device is controlled. - Region 0 has registers to control the backend processing engines. - Region 1 has registers for functionality common to all queues. - Regions 2-18 have registers for the 16 individual queues which are isolated - both in hardware and within the driver. -- interrupts: Interrupt specifiers. - Refer to interrupt-controller/interrupts.txt for generic interrupt client node - bindings. - Interrupt 0 is for the SEC unit error queue. - Interrupt 2N + 1 is the completion interrupt for queue N. - Interrupt 2N + 2 is the error interrupt for queue N. -- dma-coherent: The driver assumes coherent dma is possible. - -Optional properties: -- iommus: The SEC units are behind smmu-v3 iommus. - Refer to iommu/arm,smmu-v3.txt for more information. - -Example: - -p1_sec_a: crypto@400d2000000 { - compatible = "hisilicon,hip07-sec"; - reg = <0x400 0xd0000000 0x0 0x10000 - 0x400 0xd2000000 0x0 0x10000 - 0x400 0xd2010000 0x0 0x10000 - 0x400 0xd2020000 0x0 0x10000 - 0x400 0xd2030000 0x0 0x10000 - 0x400 0xd2040000 0x0 0x10000 - 0x400 0xd2050000 0x0 0x10000 - 0x400 0xd2060000 0x0 0x10000 - 0x400 0xd2070000 0x0 0x10000 - 0x400 0xd2080000 0x0 0x10000 - 0x400 0xd2090000 0x0 0x10000 - 0x400 0xd20a0000 0x0 0x10000 - 0x400 0xd20b0000 0x0 0x10000 - 0x400 0xd20c0000 0x0 0x10000 - 0x400 0xd20d0000 0x0 0x10000 - 0x400 0xd20e0000 0x0 0x10000 - 0x400 0xd20f0000 0x0 0x10000 - 0x400 0xd2100000 0x0 0x10000>; - interrupt-parent = <&p1_mbigen_sec_a>; - iommus = <&p1_smmu_alg_a 0x600>; - dma-coherent; - interrupts = <576 4>, - <577 1>, <578 4>, - <579 1>, <580 4>, - <581 1>, <582 4>, - <583 1>, <584 4>, - <585 1>, <586 4>, - <587 1>, <588 4>, - <589 1>, <590 4>, - <591 1>, <592 4>, - <593 1>, <594 4>, - <595 1>, <596 4>, - <597 1>, <598 4>, - <599 1>, <600 4>, - <601 1>, <602 4>, - <603 1>, <604 4>, - <605 1>, <606 4>, - <607 1>, <608 4>; -}; -- 2.51.0 From 7eff621c460249fbbd1775faa7d4245af1b23d8d Mon Sep 17 00:00:00 2001 From: "Rob Herring (Arm)" Date: Tue, 13 May 2025 13:41:16 -0500 Subject: [PATCH 02/16] dt-bindings: crypto: Convert img,hash-accelerator to DT schema Convert the Imagination Technologies hardware hash accelerator binding to DT schema format. It's a straight forward conversion. Signed-off-by: Rob Herring (Arm) Signed-off-by: Herbert Xu --- .../bindings/crypto/img,hash-accelerator.yaml | 69 +++++++++++++++++++ .../devicetree/bindings/crypto/img-hash.txt | 27 -------- 2 files changed, 69 insertions(+), 27 deletions(-) create mode 100644 Documentation/devicetree/bindings/crypto/img,hash-accelerator.yaml delete mode 100644 Documentation/devicetree/bindings/crypto/img-hash.txt diff --git a/Documentation/devicetree/bindings/crypto/img,hash-accelerator.yaml b/Documentation/devicetree/bindings/crypto/img,hash-accelerator.yaml new file mode 100644 index 000000000000..46617561ef94 --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/img,hash-accelerator.yaml @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/img,hash-accelerator.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Imagination Technologies hardware hash accelerator + +maintainers: + - James Hartley + +description: + The hash accelerator provides hardware hashing acceleration for + SHA1, SHA224, SHA256 and MD5 hashes. + +properties: + compatible: + const: img,hash-accelerator + + reg: + items: + - description: Register base address and size + - description: DMA port specifier + + interrupts: + maxItems: 1 + + dmas: + maxItems: 1 + + dma-names: + items: + - const: tx + + clocks: + items: + - description: System clock for hash block registers + - description: Hash clock for data path + + clock-names: + items: + - const: sys + - const: hash + +additionalProperties: false + +required: + - compatible + - reg + - interrupts + - dmas + - dma-names + - clocks + - clock-names + +examples: + - | + #include + #include + + hash@18149600 { + compatible = "img,hash-accelerator"; + reg = <0x18149600 0x100>, <0x18101100 0x4>; + interrupts = ; + dmas = <&dma 8 0xffffffff 0>; + dma-names = "tx"; + clocks = <&cr_periph SYS_CLK_HASH>, <&clk_periph PERIPH_CLK_ROM>; + clock-names = "sys", "hash"; + }; diff --git a/Documentation/devicetree/bindings/crypto/img-hash.txt b/Documentation/devicetree/bindings/crypto/img-hash.txt deleted file mode 100644 index 91a3d757d641..000000000000 --- a/Documentation/devicetree/bindings/crypto/img-hash.txt +++ /dev/null @@ -1,27 +0,0 @@ -Imagination Technologies hardware hash accelerator - -The hash accelerator provides hardware hashing acceleration for -SHA1, SHA224, SHA256 and MD5 hashes - -Required properties: - -- compatible : "img,hash-accelerator" -- reg : Offset and length of the register set for the module, and the DMA port -- interrupts : The designated IRQ line for the hashing module. -- dmas : DMA specifier as per Documentation/devicetree/bindings/dma/dma.txt -- dma-names : Should be "tx" -- clocks : Clock specifiers -- clock-names : "sys" Used to clock the hash block registers - "hash" Used to clock data through the accelerator - -Example: - - hash: hash@18149600 { - compatible = "img,hash-accelerator"; - reg = <0x18149600 0x100>, <0x18101100 0x4>; - interrupts = ; - dmas = <&dma 8 0xffffffff 0>; - dma-names = "tx"; - clocks = <&cr_periph SYS_CLK_HASH>, <&clk_periph PERIPH_CLK_ROM>; - clock-names = "sys", "hash"; - }; -- 2.51.0 From 7d39f32c8c77ad14bfdb49f41d87c97024d0eb63 Mon Sep 17 00:00:00 2001 From: "Rob Herring (Arm)" Date: Tue, 13 May 2025 13:41:23 -0500 Subject: [PATCH 03/16] dt-bindings: crypto: Convert Marvell CESA to DT schema Convert the Marvell CESA binding to DT schema format. The marvell-cesa.txt and mv_cesa.txt are duplicate bindings. The clock properties are quite varied for each platform hence the if/then schemas. The old binding was fairly accurate with reality. The original binding didn't mention there is 1 interrupt per CESA engine. Based on users, there's a maximum of 2 engines. Signed-off-by: Rob Herring (Arm) Signed-off-by: Herbert Xu --- .../bindings/crypto/marvell,orion-crypto.yaml | 133 ++++++++++++++++++ .../bindings/crypto/marvell-cesa.txt | 44 ------ .../devicetree/bindings/crypto/mv_cesa.txt | 32 ----- 3 files changed, 133 insertions(+), 76 deletions(-) create mode 100644 Documentation/devicetree/bindings/crypto/marvell,orion-crypto.yaml delete mode 100644 Documentation/devicetree/bindings/crypto/marvell-cesa.txt delete mode 100644 Documentation/devicetree/bindings/crypto/mv_cesa.txt diff --git a/Documentation/devicetree/bindings/crypto/marvell,orion-crypto.yaml b/Documentation/devicetree/bindings/crypto/marvell,orion-crypto.yaml new file mode 100644 index 000000000000..b44d36c50ec4 --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/marvell,orion-crypto.yaml @@ -0,0 +1,133 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/crypto/marvell,orion-crypto.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Marvell Cryptographic Engines And Security Accelerator + +maintainers: + - Andrew Lunn + - Boris Brezillon + +description: | + Marvell Cryptographic Engines And Security Accelerator + +properties: + compatible: + enum: + - marvell,armada-370-crypto + - marvell,armada-xp-crypto + - marvell,armada-375-crypto + - marvell,armada-38x-crypto + - marvell,dove-crypto + - marvell,kirkwood-crypto + - marvell,orion-crypto + + reg: + minItems: 1 + items: + - description: Registers region + - description: SRAM region + deprecated: true + + reg-names: + minItems: 1 + items: + - const: regs + - const: sram + deprecated: true + + interrupts: + description: One interrupt for each CESA engine + minItems: 1 + maxItems: 2 + + clocks: + description: One or two clocks for each CESA engine + minItems: 1 + maxItems: 4 + + clock-names: + minItems: 1 + items: + - const: cesa0 + - const: cesa1 + - const: cesaz0 + - const: cesaz1 + + marvell,crypto-srams: + description: Phandle(s) to crypto SRAM. + $ref: /schemas/types.yaml#/definitions/phandle-array + minItems: 1 + maxItems: 2 + items: + maxItems: 1 + + marvell,crypto-sram-size: + description: SRAM size reserved for crypto operations. + $ref: /schemas/types.yaml#/definitions/uint32 + default: 0x800 + +required: + - compatible + - reg + - reg-names + - interrupts + - marvell,crypto-srams + +allOf: + - if: + not: + properties: + compatible: + enum: + - marvell,kirkwood-crypto + - marvell,orion-crypto + then: + required: + - clocks + - if: + properties: + compatible: + contains: + enum: + - marvell,armada-370-crypto + - marvell,armada-375-crypto + - marvell,armada-38x-crypto + - marvell,armada-xp-crypto + then: + required: + - clock-names + - if: + properties: + compatible: + contains: + enum: + - marvell,armada-375-crypto + - marvell,armada-38x-crypto + then: + properties: + clocks: + minItems: 4 + clock-names: + minItems: 4 + else: + properties: + clocks: + maxItems: 2 + clock-names: + maxItems: 2 + +additionalProperties: false + +examples: + - | + crypto@30000 { + compatible = "marvell,orion-crypto"; + reg = <0x30000 0x10000>; + reg-names = "regs"; + interrupts = <22>; + marvell,crypto-srams = <&crypto_sram>; + marvell,crypto-sram-size = <0x600>; + }; diff --git a/Documentation/devicetree/bindings/crypto/marvell-cesa.txt b/Documentation/devicetree/bindings/crypto/marvell-cesa.txt deleted file mode 100644 index 28d3f2496b89..000000000000 --- a/Documentation/devicetree/bindings/crypto/marvell-cesa.txt +++ /dev/null @@ -1,44 +0,0 @@ -Marvell Cryptographic Engines And Security Accelerator - -Required properties: -- compatible: should be one of the following string - "marvell,orion-crypto" - "marvell,kirkwood-crypto" - "marvell,dove-crypto" - "marvell,armada-370-crypto" - "marvell,armada-xp-crypto" - "marvell,armada-375-crypto" - "marvell,armada-38x-crypto" -- reg: base physical address of the engine and length of memory mapped - region. Can also contain an entry for the SRAM attached to the CESA, - but this representation is deprecated and marvell,crypto-srams should - be used instead -- reg-names: "regs". Can contain an "sram" entry, but this representation - is deprecated and marvell,crypto-srams should be used instead -- interrupts: interrupt number -- clocks: reference to the crypto engines clocks. This property is not - required for orion and kirkwood platforms -- clock-names: "cesaX" and "cesazX", X should be replaced by the crypto engine - id. - This property is not required for the orion and kirkwoord - platforms. - "cesazX" clocks are not required on armada-370 platforms -- marvell,crypto-srams: phandle to crypto SRAM definitions - -Optional properties: -- marvell,crypto-sram-size: SRAM size reserved for crypto operations, if not - specified the whole SRAM is used (2KB) - - -Examples: - - crypto@90000 { - compatible = "marvell,armada-xp-crypto"; - reg = <0x90000 0x10000>; - reg-names = "regs"; - interrupts = <48>, <49>; - clocks = <&gateclk 23>, <&gateclk 23>; - clock-names = "cesa0", "cesa1"; - marvell,crypto-srams = <&crypto_sram0>, <&crypto_sram1>; - marvell,crypto-sram-size = <0x600>; - }; diff --git a/Documentation/devicetree/bindings/crypto/mv_cesa.txt b/Documentation/devicetree/bindings/crypto/mv_cesa.txt deleted file mode 100644 index d9b92e2f3138..000000000000 --- a/Documentation/devicetree/bindings/crypto/mv_cesa.txt +++ /dev/null @@ -1,32 +0,0 @@ -Marvell Cryptographic Engines And Security Accelerator - -Required properties: -- compatible: should be one of the following string - "marvell,orion-crypto" - "marvell,kirkwood-crypto" - "marvell,dove-crypto" -- reg: base physical address of the engine and length of memory mapped - region. Can also contain an entry for the SRAM attached to the CESA, - but this representation is deprecated and marvell,crypto-srams should - be used instead -- reg-names: "regs". Can contain an "sram" entry, but this representation - is deprecated and marvell,crypto-srams should be used instead -- interrupts: interrupt number -- clocks: reference to the crypto engines clocks. This property is only - required for Dove platforms -- marvell,crypto-srams: phandle to crypto SRAM definitions - -Optional properties: -- marvell,crypto-sram-size: SRAM size reserved for crypto operations, if not - specified the whole SRAM is used (2KB) - -Examples: - - crypto@30000 { - compatible = "marvell,orion-crypto"; - reg = <0x30000 0x10000>; - reg-names = "regs"; - interrupts = <22>; - marvell,crypto-srams = <&crypto_sram>; - marvell,crypto-sram-size = <0x600>; - }; -- 2.51.0 From cd5a4d53069ccd7cd48d201c981d5a8a99c93fc4 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 15 May 2025 13:54:33 +0800 Subject: [PATCH 04/16] crypto: hash - Move core export and import into internel/hash.h The core export and import functions are targeted at implementors so move them into internal/hash.h. Signed-off-by: Herbert Xu --- include/crypto/hash.h | 48 ---------------------------------- include/crypto/internal/hash.h | 48 ++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 48 deletions(-) diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 1760662ad70a..9fc9daaaaab4 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -506,18 +506,6 @@ int crypto_ahash_digest(struct ahash_request *req); */ int crypto_ahash_export(struct ahash_request *req, void *out); -/** - * crypto_ahash_export_core() - extract core state for message digest - * @req: reference to the ahash_request handle whose state is exported - * @out: output buffer of sufficient size that can hold the hash state - * - * Export the hash state without the partial block buffer. - * - * Context: Softirq or process context. - * Return: 0 if the export creation was successful; < 0 if an error occurred - */ -int crypto_ahash_export_core(struct ahash_request *req, void *out); - /** * crypto_ahash_import() - import message digest state * @req: reference to ahash_request handle the state is imported into @@ -531,18 +519,6 @@ int crypto_ahash_export_core(struct ahash_request *req, void *out); */ int crypto_ahash_import(struct ahash_request *req, const void *in); -/** - * crypto_ahash_import_core() - import core state - * @req: reference to ahash_request handle the state is imported into - * @in: buffer holding the state - * - * Import the hash state without the partial block buffer. - * - * Context: Softirq or process context. - * Return: 0 if the import was successful; < 0 if an error occurred - */ -int crypto_ahash_import_core(struct ahash_request *req, const void *in); - /** * crypto_ahash_init() - (re)initialize message digest handle * @req: ahash_request handle that already is initialized with all necessary @@ -933,18 +909,6 @@ int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data, */ int crypto_shash_export(struct shash_desc *desc, void *out); -/** - * crypto_shash_export_core() - extract core state for message digest - * @desc: reference to the operational state handle whose state is exported - * @out: output buffer of sufficient size that can hold the hash state - * - * Export the hash state without the partial block buffer. - * - * Context: Softirq or process context. - * Return: 0 if the export creation was successful; < 0 if an error occurred - */ -int crypto_shash_export_core(struct shash_desc *desc, void *out); - /** * crypto_shash_import() - import operational state * @desc: reference to the operational state handle the state imported into @@ -959,18 +923,6 @@ int crypto_shash_export_core(struct shash_desc *desc, void *out); */ int crypto_shash_import(struct shash_desc *desc, const void *in); -/** - * crypto_shash_import_core() - import core state - * @desc: reference to the operational state handle the state imported into - * @in: buffer holding the state - * - * Import the hash state without the partial block buffer. - * - * Context: Softirq or process context. - * Return: 0 if the import was successful; < 0 if an error occurred - */ -int crypto_shash_import_core(struct shash_desc *desc, const void *in); - /** * crypto_shash_init() - (re)initialize message digest * @desc: operational state handle that is already filled diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index f2bbdb74e11a..ef5ea75ac5c8 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -305,5 +305,53 @@ static inline unsigned int crypto_shash_coresize(struct crypto_shash *tfm) #define HASH_REQUEST_ZERO(name) \ memzero_explicit(__##name##_req, sizeof(__##name##_req)) +/** + * crypto_ahash_export_core() - extract core state for message digest + * @req: reference to the ahash_request handle whose state is exported + * @out: output buffer of sufficient size that can hold the hash state + * + * Export the hash state without the partial block buffer. + * + * Context: Softirq or process context. + * Return: 0 if the export creation was successful; < 0 if an error occurred + */ +int crypto_ahash_export_core(struct ahash_request *req, void *out); + +/** + * crypto_ahash_import_core() - import core state + * @req: reference to ahash_request handle the state is imported into + * @in: buffer holding the state + * + * Import the hash state without the partial block buffer. + * + * Context: Softirq or process context. + * Return: 0 if the import was successful; < 0 if an error occurred + */ +int crypto_ahash_import_core(struct ahash_request *req, const void *in); + +/** + * crypto_shash_export_core() - extract core state for message digest + * @desc: reference to the operational state handle whose state is exported + * @out: output buffer of sufficient size that can hold the hash state + * + * Export the hash state without the partial block buffer. + * + * Context: Softirq or process context. + * Return: 0 if the export creation was successful; < 0 if an error occurred + */ +int crypto_shash_export_core(struct shash_desc *desc, void *out); + +/** + * crypto_shash_import_core() - import core state + * @desc: reference to the operational state handle the state imported into + * @in: buffer holding the state + * + * Import the hash state without the partial block buffer. + * + * Context: Softirq or process context. + * Return: 0 if the import was successful; < 0 if an error occurred + */ +int crypto_shash_import_core(struct shash_desc *desc, const void *in); + #endif /* _CRYPTO_INTERNAL_HASH_H */ -- 2.51.0 From c6a12f394c488cf6a7ca35c1ad51e0e88897de2e Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 15 May 2025 13:54:35 +0800 Subject: [PATCH 05/16] crypto: hash - Add export_core and import_core hooks Add export_core and import_core hooks. These are intended to be used by algorithms which are wrappers around block-only algorithms, but are not themselves block-only, e.g., hmac. Signed-off-by: Herbert Xu --- crypto/ahash.c | 22 ++++++++++++++--- crypto/shash.c | 44 +++++++++++++++++++++++++++------- include/crypto/hash.h | 10 ++++++++ include/crypto/internal/hash.h | 3 +++ 4 files changed, 68 insertions(+), 11 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index 344bf1b43e71..7d96c76731ef 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -704,7 +704,7 @@ int crypto_ahash_export_core(struct ahash_request *req, void *out) if (likely(tfm->using_shash)) return crypto_shash_export_core(ahash_request_ctx(req), out); - return crypto_ahash_alg(tfm)->export(req, out); + return crypto_ahash_alg(tfm)->export_core(req, out); } EXPORT_SYMBOL_GPL(crypto_ahash_export_core); @@ -727,7 +727,7 @@ int crypto_ahash_import_core(struct ahash_request *req, const void *in) in); if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; - return crypto_ahash_alg(tfm)->import(req, in); + return crypto_ahash_alg(tfm)->import_core(req, in); } EXPORT_SYMBOL_GPL(crypto_ahash_import_core); @@ -739,7 +739,7 @@ int crypto_ahash_import(struct ahash_request *req, const void *in) return crypto_shash_import(prepare_shash_desc(req, tfm), in); if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; - return crypto_ahash_import_core(req, in); + return crypto_ahash_alg(tfm)->import(req, in); } EXPORT_SYMBOL_GPL(crypto_ahash_import); @@ -971,6 +971,16 @@ out_free_nhash: } EXPORT_SYMBOL_GPL(crypto_clone_ahash); +static int ahash_default_export_core(struct ahash_request *req, void *out) +{ + return -ENOSYS; +} + +static int ahash_default_import_core(struct ahash_request *req, const void *in) +{ + return -ENOSYS; +} + static int ahash_prepare_alg(struct ahash_alg *alg) { struct crypto_alg *base = &alg->halg.base; @@ -996,6 +1006,12 @@ static int ahash_prepare_alg(struct ahash_alg *alg) if (!alg->setkey) alg->setkey = ahash_nosetkey; + if (!alg->export_core || !alg->import_core) { + alg->export_core = ahash_default_export_core; + alg->import_core = ahash_default_import_core; + base->cra_flags |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE; + } + return 0; } diff --git a/crypto/shash.c b/crypto/shash.c index dee391d47f51..5bc74a72d5ad 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -203,9 +203,10 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, } EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest); -int crypto_shash_export_core(struct shash_desc *desc, void *out) +static int __crypto_shash_export(struct shash_desc *desc, void *out, + int (*export)(struct shash_desc *desc, + void *out)) { - int (*export)(struct shash_desc *desc, void *out); struct crypto_shash *tfm = desc->tfm; u8 *buf = shash_desc_ctx(desc); unsigned int plen, ss; @@ -214,7 +215,6 @@ int crypto_shash_export_core(struct shash_desc *desc, void *out) ss = crypto_shash_statesize(tfm); if (crypto_shash_block_only(tfm)) ss -= plen; - export = crypto_shash_alg(tfm)->export; if (!export) { memcpy(out, buf, ss); return 0; @@ -222,6 +222,12 @@ int crypto_shash_export_core(struct shash_desc *desc, void *out) return export(desc, out); } + +int crypto_shash_export_core(struct shash_desc *desc, void *out) +{ + return __crypto_shash_export(desc, out, + crypto_shash_alg(desc->tfm)->export_core); +} EXPORT_SYMBOL_GPL(crypto_shash_export_core); int crypto_shash_export(struct shash_desc *desc, void *out) @@ -236,13 +242,14 @@ int crypto_shash_export(struct shash_desc *desc, void *out) memcpy(out + ss - plen, buf + descsize - plen, plen); } - return crypto_shash_export_core(desc, out); + return __crypto_shash_export(desc, out, crypto_shash_alg(tfm)->export); } EXPORT_SYMBOL_GPL(crypto_shash_export); -int crypto_shash_import_core(struct shash_desc *desc, const void *in) +static int __crypto_shash_import(struct shash_desc *desc, const void *in, + int (*import)(struct shash_desc *desc, + const void *in)) { - int (*import)(struct shash_desc *desc, const void *in); struct crypto_shash *tfm = desc->tfm; unsigned int descsize, plen, ss; u8 *buf = shash_desc_ctx(desc); @@ -256,7 +263,6 @@ int crypto_shash_import_core(struct shash_desc *desc, const void *in) buf[descsize - 1] = 0; if (crypto_shash_block_only(tfm)) ss -= plen; - import = crypto_shash_alg(tfm)->import; if (!import) { memcpy(buf, in, ss); return 0; @@ -264,6 +270,12 @@ int crypto_shash_import_core(struct shash_desc *desc, const void *in) return import(desc, in); } + +int crypto_shash_import_core(struct shash_desc *desc, const void *in) +{ + return __crypto_shash_import(desc, in, + crypto_shash_alg(desc->tfm)->import_core); +} EXPORT_SYMBOL_GPL(crypto_shash_import_core); int crypto_shash_import(struct shash_desc *desc, const void *in) @@ -271,7 +283,7 @@ int crypto_shash_import(struct shash_desc *desc, const void *in) struct crypto_shash *tfm = desc->tfm; int err; - err = crypto_shash_import_core(desc, in); + err = __crypto_shash_import(desc, in, crypto_shash_alg(tfm)->import); if (crypto_shash_block_only(tfm)) { unsigned int plen = crypto_shash_blocksize(tfm) + 1; unsigned int descsize = crypto_shash_descsize(tfm); @@ -436,6 +448,16 @@ int hash_prepare_alg(struct hash_alg_common *alg) return 0; } +static int shash_default_export_core(struct shash_desc *desc, void *out) +{ + return -ENOSYS; +} + +static int shash_default_import_core(struct shash_desc *desc, const void *in) +{ + return -ENOSYS; +} + static int shash_prepare_alg(struct shash_alg *alg) { struct crypto_alg *base = &alg->halg.base; @@ -476,6 +498,12 @@ static int shash_prepare_alg(struct shash_alg *alg) BUILD_BUG_ON(MAX_ALGAPI_BLOCKSIZE >= 256); alg->descsize += base->cra_blocksize + 1; alg->statesize += base->cra_blocksize + 1; + alg->export_core = alg->export; + alg->import_core = alg->import; + } else if (!alg->export_core || !alg->import_core) { + alg->export_core = shash_default_export_core; + alg->import_core = shash_default_import_core; + base->cra_flags |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE; } if (alg->descsize > HASH_MAX_DESCSIZE) diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 9fc9daaaaab4..bf177cf9be10 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -129,6 +129,10 @@ struct ahash_request { * data so the transformation can continue from this point onward. No * data processing happens at this point. Driver must not use * req->result. + * @export_core: Export partial state without partial block. Only defined + * for algorithms that are not block-only. + * @import_core: Import partial state without partial block. Only defined + * for algorithms that are not block-only. * @init_tfm: Initialize the cryptographic transformation object. * This function is called only once at the instantiation * time, right after the transformation context was @@ -151,6 +155,8 @@ struct ahash_alg { int (*digest)(struct ahash_request *req); int (*export)(struct ahash_request *req, void *out); int (*import)(struct ahash_request *req, const void *in); + int (*export_core)(struct ahash_request *req, void *out); + int (*import_core)(struct ahash_request *req, const void *in); int (*setkey)(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen); int (*init_tfm)(struct crypto_ahash *tfm); @@ -200,6 +206,8 @@ struct shash_desc { * @digest: see struct ahash_alg * @export: see struct ahash_alg * @import: see struct ahash_alg + * @export_core: see struct ahash_alg + * @import_core: see struct ahash_alg * @setkey: see struct ahash_alg * @init_tfm: Initialize the cryptographic transformation object. * This function is called only once at the instantiation @@ -230,6 +238,8 @@ struct shash_alg { unsigned int len, u8 *out); int (*export)(struct shash_desc *desc, void *out); int (*import)(struct shash_desc *desc, const void *in); + int (*export_core)(struct shash_desc *desc, void *out); + int (*import_core)(struct shash_desc *desc, const void *in); int (*setkey)(struct crypto_shash *tfm, const u8 *key, unsigned int keylen); int (*init_tfm)(struct crypto_shash *tfm); diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index ef5ea75ac5c8..e9de2bc34a10 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -20,6 +20,9 @@ /* Set this bit if finup can deal with multiple blocks. */ #define CRYPTO_AHASH_ALG_FINUP_MAX 0x04000000 +/* This bit is set by the Crypto API if export_core is not supported. */ +#define CRYPTO_AHASH_ALG_NO_EXPORT_CORE 0x08000000 + #define HASH_FBREQ_ON_STACK(name, req) \ char __##name##_req[sizeof(struct ahash_request) + \ MAX_SYNC_HASH_REQSIZE] CRYPTO_MINALIGN_ATTR; \ -- 2.51.0 From 9d7a0ab1c75365158b28a5f7fa516061ea40b1b8 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 15 May 2025 13:54:37 +0800 Subject: [PATCH 06/16] crypto: ahash - Handle partial blocks in API Provide an option to handle the partial blocks in the ahash API. Almost every hash algorithm has a block size and are only able to hash partial blocks on finalisation. As a first step disable virtual address support for algorithms with state sizes larger than HASH_MAX_STATESIZE. This is OK as virtual addresses are currently only used on synchronous fallbacks. This means ahash_do_req_chain only needs to handle synchronous fallbacks, removing the complexities of saving the request state. Also move the saved request state into the ahash_request object as nesting is no longer possible. Add a scatterlist to ahash_request to store the partial block. Signed-off-by: Herbert Xu --- crypto/ahash.c | 527 ++++++++++++++++++++---------------------- include/crypto/hash.h | 12 +- 2 files changed, 258 insertions(+), 281 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index 7d96c76731ef..cf8bbe7e54c0 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -12,11 +12,13 @@ * Copyright (c) 2008 Loc Ho */ +#include #include #include #include #include #include +#include #include #include #include @@ -40,23 +42,46 @@ struct crypto_hash_walk { struct scatterlist *sg; }; -struct ahash_save_req_state { - struct ahash_request *req0; +static int ahash_def_finup(struct ahash_request *req); + +static inline bool crypto_ahash_block_only(struct crypto_ahash *tfm) +{ + return crypto_ahash_alg(tfm)->halg.base.cra_flags & + CRYPTO_AHASH_ALG_BLOCK_ONLY; +} + +static inline bool crypto_ahash_final_nonzero(struct crypto_ahash *tfm) +{ + return crypto_ahash_alg(tfm)->halg.base.cra_flags & + CRYPTO_AHASH_ALG_FINAL_NONZERO; +} + +static inline bool crypto_ahash_need_fallback(struct crypto_ahash *tfm) +{ + return crypto_ahash_alg(tfm)->halg.base.cra_flags & + CRYPTO_ALG_NEED_FALLBACK; +} + +static inline void ahash_op_done(void *data, int err, + int (*finish)(struct ahash_request *, int)) +{ + struct ahash_request *areq = data; crypto_completion_t compl; - void *data; - struct scatterlist sg; - const u8 *src; - u8 *page; - unsigned int offset; - unsigned int nbytes; - bool update; -}; -static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt); -static void ahash_restore_req(struct ahash_request *req); -static void ahash_def_finup_done1(void *data, int err); -static int ahash_def_finup_finish1(struct ahash_request *req, int err); -static int ahash_def_finup(struct ahash_request *req); + compl = areq->saved_complete; + data = areq->saved_data; + if (err == -EINPROGRESS) + goto out; + + areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + err = finish(areq, err); + if (err == -EINPROGRESS || err == -EBUSY) + return; + +out: + compl(data, err); +} static int hash_walk_next(struct crypto_hash_walk *walk) { @@ -298,7 +323,7 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, int err; err = alg->setkey(tfm, key, keylen); - if (!err && ahash_is_async(tfm)) + if (!err && crypto_ahash_need_fallback(tfm)) err = crypto_ahash_setkey(crypto_ahash_fb(tfm), key, keylen); if (unlikely(err)) { @@ -311,159 +336,47 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, } EXPORT_SYMBOL_GPL(crypto_ahash_setkey); -static int ahash_reqchain_virt(struct ahash_save_req_state *state, - int err, u32 mask) -{ - struct ahash_request *req = state->req0; - struct crypto_ahash *tfm; - - tfm = crypto_ahash_reqtfm(req); - - for (;;) { - unsigned len = state->nbytes; - - if (!state->offset) - break; - - if (state->offset == len || err) { - u8 *result = req->result; - - ahash_request_set_virt(req, state->src, result, len); - state->offset = 0; - break; - } - - len -= state->offset; - - len = min(PAGE_SIZE, len); - memcpy(state->page, state->src + state->offset, len); - state->offset += len; - req->nbytes = len; - - err = crypto_ahash_alg(tfm)->update(req); - if (err == -EINPROGRESS) { - if (state->offset < state->nbytes) - err = -EBUSY; - break; - } - - if (err == -EBUSY) - break; - } - - return err; -} - -static int ahash_reqchain_finish(struct ahash_request *req0, - struct ahash_save_req_state *state, - int err, u32 mask) -{ - u8 *page; - - err = ahash_reqchain_virt(state, err, mask); - if (err == -EINPROGRESS || err == -EBUSY) - goto out; - - page = state->page; - if (page) { - memset(page, 0, PAGE_SIZE); - free_page((unsigned long)page); - } - ahash_restore_req(req0); - -out: - return err; -} - -static void ahash_reqchain_done(void *data, int err) -{ - struct ahash_save_req_state *state = data; - crypto_completion_t compl = state->compl; - - data = state->data; - - if (err == -EINPROGRESS) { - if (state->offset < state->nbytes) - return; - goto notify; - } - - err = ahash_reqchain_finish(state->req0, state, err, - CRYPTO_TFM_REQ_MAY_BACKLOG); - if (err == -EBUSY) - return; - -notify: - compl(data, err); -} - static int ahash_do_req_chain(struct ahash_request *req, - int (*op)(struct ahash_request *req)) + int (*const *op)(struct ahash_request *req)) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - bool update = op == crypto_ahash_alg(tfm)->update; - struct ahash_save_req_state *state; - struct ahash_save_req_state state0; - u8 *page = NULL; int err; - if (crypto_ahash_req_virt(tfm) || - !update || !ahash_request_isvirt(req)) - return op(req); - - if (update && ahash_request_isvirt(req)) { - page = (void *)__get_free_page(GFP_ATOMIC); - err = -ENOMEM; - if (!page) - goto out; - } - - state = &state0; - if (ahash_is_async(tfm)) { - err = ahash_save_req(req, ahash_reqchain_done); - if (err) - goto out_free_page; + if (crypto_ahash_req_virt(tfm) || !ahash_request_isvirt(req)) + return (*op)(req); - state = req->base.data; - } - - state->update = update; - state->page = page; - state->offset = 0; - state->nbytes = 0; + if (crypto_ahash_statesize(tfm) > HASH_MAX_STATESIZE) + return -ENOSYS; - if (page) - sg_init_one(&state->sg, page, PAGE_SIZE); + { + u8 state[HASH_MAX_STATESIZE]; - if (update && ahash_request_isvirt(req) && req->nbytes) { - unsigned len = req->nbytes; - u8 *result = req->result; + if (op == &crypto_ahash_alg(tfm)->digest) { + ahash_request_set_tfm(req, crypto_ahash_fb(tfm)); + err = crypto_ahash_digest(req); + goto out_no_state; + } - state->src = req->svirt; - state->nbytes = len; + err = crypto_ahash_export(req, state); + ahash_request_set_tfm(req, crypto_ahash_fb(tfm)); + err = err ?: crypto_ahash_import(req, state); - len = min(PAGE_SIZE, len); + if (op == &crypto_ahash_alg(tfm)->finup) { + err = err ?: crypto_ahash_finup(req); + goto out_no_state; + } - memcpy(page, req->svirt, len); - state->offset = len; + err = err ?: + crypto_ahash_update(req) ?: + crypto_ahash_export(req, state); - ahash_request_set_crypt(req, &state->sg, result, len); - } + ahash_request_set_tfm(req, tfm); + return err ?: crypto_ahash_import(req, state); - err = op(req); - if (err == -EINPROGRESS || err == -EBUSY) { - if (state->offset < state->nbytes) - err = -EBUSY; +out_no_state: + ahash_request_set_tfm(req, tfm); return err; } - - return ahash_reqchain_finish(req, state, err, ~0); - -out_free_page: - free_page((unsigned long)page); - -out: - return err; } int crypto_ahash_init(struct ahash_request *req) @@ -476,143 +389,190 @@ int crypto_ahash_init(struct ahash_request *req) return -ENOKEY; if (ahash_req_on_stack(req) && ahash_is_async(tfm)) return -EAGAIN; - return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->init); + if (crypto_ahash_block_only(tfm)) { + u8 *buf = ahash_request_ctx(req); + + buf += crypto_ahash_reqsize(tfm) - 1; + *buf = 0; + } + return crypto_ahash_alg(tfm)->init(req); } EXPORT_SYMBOL_GPL(crypto_ahash_init); -static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) +static void ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ahash_save_req_state *state; - - if (!ahash_is_async(tfm)) - return 0; - - state = kmalloc(sizeof(*state), GFP_ATOMIC); - if (!state) - return -ENOMEM; - - state->compl = req->base.complete; - state->data = req->base.data; + req->saved_complete = req->base.complete; + req->saved_data = req->base.data; req->base.complete = cplt; - req->base.data = state; - state->req0 = req; - - return 0; + req->base.data = req; } static void ahash_restore_req(struct ahash_request *req) { - struct ahash_save_req_state *state; - struct crypto_ahash *tfm; - - tfm = crypto_ahash_reqtfm(req); - if (!ahash_is_async(tfm)) - return; - - state = req->base.data; - - req->base.complete = state->compl; - req->base.data = state->data; - kfree(state); + req->base.complete = req->saved_complete; + req->base.data = req->saved_data; } -int crypto_ahash_update(struct ahash_request *req) +static int ahash_update_finish(struct ahash_request *req, int err) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + bool nonzero = crypto_ahash_final_nonzero(tfm); + int bs = crypto_ahash_blocksize(tfm); + u8 *blenp = ahash_request_ctx(req); + int blen; + u8 *buf; + + blenp += crypto_ahash_reqsize(tfm) - 1; + blen = *blenp; + buf = blenp - bs; + + if (blen) { + req->src = req->sg_head + 1; + if (sg_is_chain(req->src)) + req->src = sg_chain_ptr(req->src); + } - if (likely(tfm->using_shash)) - return shash_ahash_update(req, ahash_request_ctx(req)); - if (ahash_req_on_stack(req) && ahash_is_async(tfm)) - return -EAGAIN; - return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->update); + req->nbytes += nonzero - blen; + + blen = err < 0 ? 0 : err + nonzero; + if (ahash_request_isvirt(req)) + memcpy(buf, req->svirt + req->nbytes - blen, blen); + else + memcpy_from_sglist(buf, req->src, req->nbytes - blen, blen); + *blenp = blen; + + ahash_restore_req(req); + + return err; } -EXPORT_SYMBOL_GPL(crypto_ahash_update); -int crypto_ahash_final(struct ahash_request *req) +static void ahash_update_done(void *data, int err) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - - if (likely(tfm->using_shash)) - return crypto_shash_final(ahash_request_ctx(req), req->result); - if (ahash_req_on_stack(req) && ahash_is_async(tfm)) - return -EAGAIN; - return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->final); + ahash_op_done(data, err, ahash_update_finish); } -EXPORT_SYMBOL_GPL(crypto_ahash_final); -int crypto_ahash_finup(struct ahash_request *req) +int crypto_ahash_update(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + bool nonzero = crypto_ahash_final_nonzero(tfm); + int bs = crypto_ahash_blocksize(tfm); + u8 *blenp = ahash_request_ctx(req); + int blen, err; + u8 *buf; if (likely(tfm->using_shash)) - return shash_ahash_finup(req, ahash_request_ctx(req)); + return shash_ahash_update(req, ahash_request_ctx(req)); if (ahash_req_on_stack(req) && ahash_is_async(tfm)) return -EAGAIN; - if (!crypto_ahash_alg(tfm)->finup || - (!crypto_ahash_req_virt(tfm) && ahash_request_isvirt(req))) - return ahash_def_finup(req); - return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->finup); -} -EXPORT_SYMBOL_GPL(crypto_ahash_finup); + if (!crypto_ahash_block_only(tfm)) + return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->update); -static int ahash_def_digest_finish(struct ahash_request *req, int err) -{ - struct crypto_ahash *tfm; + blenp += crypto_ahash_reqsize(tfm) - 1; + blen = *blenp; + buf = blenp - bs; - if (err) - goto out; + if (blen + req->nbytes < bs + nonzero) { + if (ahash_request_isvirt(req)) + memcpy(buf + blen, req->svirt, req->nbytes); + else + memcpy_from_sglist(buf + blen, req->src, 0, + req->nbytes); - tfm = crypto_ahash_reqtfm(req); - if (ahash_is_async(tfm)) - req->base.complete = ahash_def_finup_done1; + *blenp += req->nbytes; + return 0; + } - err = crypto_ahash_update(req); + if (blen) { + memset(req->sg_head, 0, sizeof(req->sg_head[0])); + sg_set_buf(req->sg_head, buf, blen); + if (req->src != req->sg_head + 1) + sg_chain(req->sg_head, 2, req->src); + req->src = req->sg_head; + req->nbytes += blen; + } + req->nbytes -= nonzero; + + ahash_save_req(req, ahash_update_done); + + err = ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->update); if (err == -EINPROGRESS || err == -EBUSY) return err; - return ahash_def_finup_finish1(req, err); - -out: - ahash_restore_req(req); - return err; + return ahash_update_finish(req, err); } +EXPORT_SYMBOL_GPL(crypto_ahash_update); -static void ahash_def_digest_done(void *data, int err) +static int ahash_finup_finish(struct ahash_request *req, int err) { - struct ahash_save_req_state *state0 = data; - struct ahash_save_req_state state; - struct ahash_request *areq; - - state = *state0; - areq = state.req0; - if (err == -EINPROGRESS) - goto out; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + u8 *blenp = ahash_request_ctx(req); + int blen; + + blenp += crypto_ahash_reqsize(tfm) - 1; + blen = *blenp; + + if (blen) { + if (sg_is_last(req->src)) + req->src = NULL; + else { + req->src = req->sg_head + 1; + if (sg_is_chain(req->src)) + req->src = sg_chain_ptr(req->src); + } + req->nbytes -= blen; + } - areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_restore_req(req); - err = ahash_def_digest_finish(areq, err); - if (err == -EINPROGRESS || err == -EBUSY) - return; + return err; +} -out: - state.compl(state.data, err); +static void ahash_finup_done(void *data, int err) +{ + ahash_op_done(data, err, ahash_finup_finish); } -static int ahash_def_digest(struct ahash_request *req) +int crypto_ahash_finup(struct ahash_request *req) { - int err; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + int bs = crypto_ahash_blocksize(tfm); + u8 *blenp = ahash_request_ctx(req); + int blen, err; + u8 *buf; - err = ahash_save_req(req, ahash_def_digest_done); - if (err) - return err; + if (likely(tfm->using_shash)) + return shash_ahash_finup(req, ahash_request_ctx(req)); + if (ahash_req_on_stack(req) && ahash_is_async(tfm)) + return -EAGAIN; + if (!crypto_ahash_alg(tfm)->finup) + return ahash_def_finup(req); + if (!crypto_ahash_block_only(tfm)) + return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->finup); + + blenp += crypto_ahash_reqsize(tfm) - 1; + blen = *blenp; + buf = blenp - bs; + + if (blen) { + memset(req->sg_head, 0, sizeof(req->sg_head[0])); + sg_set_buf(req->sg_head, buf, blen); + if (!req->src) + sg_mark_end(req->sg_head); + else if (req->src != req->sg_head + 1) + sg_chain(req->sg_head, 2, req->src); + req->src = req->sg_head; + req->nbytes += blen; + } - err = crypto_ahash_init(req); + ahash_save_req(req, ahash_finup_done); + + err = ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->finup); if (err == -EINPROGRESS || err == -EBUSY) return err; - return ahash_def_digest_finish(req, err); + return ahash_finup_finish(req, err); } +EXPORT_SYMBOL_GPL(crypto_ahash_finup); int crypto_ahash_digest(struct ahash_request *req) { @@ -622,18 +582,15 @@ int crypto_ahash_digest(struct ahash_request *req) return shash_ahash_digest(req, prepare_shash_desc(req, tfm)); if (ahash_req_on_stack(req) && ahash_is_async(tfm)) return -EAGAIN; - if (!crypto_ahash_req_virt(tfm) && ahash_request_isvirt(req)) - return ahash_def_digest(req); if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; - return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->digest); + return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->digest); } EXPORT_SYMBOL_GPL(crypto_ahash_digest); static void ahash_def_finup_done2(void *data, int err) { - struct ahash_save_req_state *state = data; - struct ahash_request *areq = state->req0; + struct ahash_request *areq = data; if (err == -EINPROGRESS) return; @@ -644,14 +601,10 @@ static void ahash_def_finup_done2(void *data, int err) static int ahash_def_finup_finish1(struct ahash_request *req, int err) { - struct crypto_ahash *tfm; - if (err) goto out; - tfm = crypto_ahash_reqtfm(req); - if (ahash_is_async(tfm)) - req->base.complete = ahash_def_finup_done2; + req->base.complete = ahash_def_finup_done2; err = crypto_ahash_final(req); if (err == -EINPROGRESS || err == -EBUSY) @@ -664,32 +617,14 @@ out: static void ahash_def_finup_done1(void *data, int err) { - struct ahash_save_req_state *state0 = data; - struct ahash_save_req_state state; - struct ahash_request *areq; - - state = *state0; - areq = state.req0; - if (err == -EINPROGRESS) - goto out; - - areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - - err = ahash_def_finup_finish1(areq, err); - if (err == -EINPROGRESS || err == -EBUSY) - return; - -out: - state.compl(state.data, err); + ahash_op_done(data, err, ahash_def_finup_finish1); } static int ahash_def_finup(struct ahash_request *req) { int err; - err = ahash_save_req(req, ahash_def_finup_done1); - if (err) - return err; + ahash_save_req(req, ahash_def_finup_done1); err = crypto_ahash_update(req); if (err == -EINPROGRESS || err == -EBUSY) @@ -714,6 +649,14 @@ int crypto_ahash_export(struct ahash_request *req, void *out) if (likely(tfm->using_shash)) return crypto_shash_export(ahash_request_ctx(req), out); + if (crypto_ahash_block_only(tfm)) { + unsigned int plen = crypto_ahash_blocksize(tfm) + 1; + unsigned int reqsize = crypto_ahash_reqsize(tfm); + unsigned int ss = crypto_ahash_statesize(tfm); + u8 *buf = ahash_request_ctx(req); + + memcpy(out + ss - plen, buf + reqsize - plen, plen); + } return crypto_ahash_alg(tfm)->export(req, out); } EXPORT_SYMBOL_GPL(crypto_ahash_export); @@ -739,6 +682,12 @@ int crypto_ahash_import(struct ahash_request *req, const void *in) return crypto_shash_import(prepare_shash_desc(req, tfm), in); if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; + if (crypto_ahash_block_only(tfm)) { + unsigned int reqsize = crypto_ahash_reqsize(tfm); + u8 *buf = ahash_request_ctx(req); + + buf[reqsize - 1] = 0; + } return crypto_ahash_alg(tfm)->import(req, in); } EXPORT_SYMBOL_GPL(crypto_ahash_import); @@ -753,7 +702,7 @@ static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm) else if (tfm->__crt_alg->cra_exit) tfm->__crt_alg->cra_exit(tfm); - if (ahash_is_async(hash)) + if (crypto_ahash_need_fallback(hash)) crypto_free_ahash(crypto_ahash_fb(hash)); } @@ -770,9 +719,12 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) if (tfm->__crt_alg->cra_type == &crypto_shash_type) return crypto_init_ahash_using_shash(tfm); - if (ahash_is_async(hash)) { + if (crypto_ahash_need_fallback(hash)) { fb = crypto_alloc_ahash(crypto_ahash_alg_name(hash), - 0, CRYPTO_ALG_ASYNC); + CRYPTO_ALG_REQ_VIRT, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_REQ_VIRT | + CRYPTO_AHASH_ALG_NO_EXPORT_CORE); if (IS_ERR(fb)) return PTR_ERR(fb); @@ -797,6 +749,10 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) MAX_SYNC_HASH_REQSIZE) goto out_exit_tfm; + BUILD_BUG_ON(HASH_MAX_DESCSIZE > MAX_SYNC_HASH_REQSIZE); + if (crypto_ahash_reqsize(hash) < HASH_MAX_DESCSIZE) + crypto_ahash_set_reqsize(hash, HASH_MAX_DESCSIZE); + return 0; out_exit_tfm: @@ -941,7 +897,7 @@ struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash) return nhash; } - if (ahash_is_async(hash)) { + if (crypto_ahash_need_fallback(hash)) { fb = crypto_clone_ahash(crypto_ahash_fb(hash)); err = PTR_ERR(fb); if (IS_ERR(fb)) @@ -1003,10 +959,23 @@ static int ahash_prepare_alg(struct ahash_alg *alg) base->cra_type = &crypto_ahash_type; base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; + if ((base->cra_flags ^ CRYPTO_ALG_REQ_VIRT) & + (CRYPTO_ALG_ASYNC | CRYPTO_ALG_REQ_VIRT)) + base->cra_flags |= CRYPTO_ALG_NEED_FALLBACK; + if (!alg->setkey) alg->setkey = ahash_nosetkey; - if (!alg->export_core || !alg->import_core) { + if (base->cra_flags & CRYPTO_AHASH_ALG_BLOCK_ONLY) { + BUILD_BUG_ON(MAX_ALGAPI_BLOCKSIZE >= 256); + if (!alg->finup) + return -EINVAL; + + base->cra_reqsize += base->cra_blocksize + 1; + alg->halg.statesize += base->cra_blocksize + 1; + alg->export_core = alg->export; + alg->import_core = alg->import; + } else if (!alg->export_core || !alg->import_core) { alg->export_core = ahash_default_export_core; alg->import_core = ahash_default_import_core; base->cra_flags |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE; diff --git a/include/crypto/hash.h b/include/crypto/hash.h index bf177cf9be10..05ee817a3180 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -8,8 +8,8 @@ #ifndef _CRYPTO_HASH_H #define _CRYPTO_HASH_H -#include #include +#include #include #include @@ -65,6 +65,10 @@ struct ahash_request { }; u8 *result; + struct scatterlist sg_head[2]; + crypto_completion_t saved_complete; + void *saved_data; + void *__ctx[] CRYPTO_MINALIGN_ATTR; }; @@ -488,7 +492,11 @@ int crypto_ahash_finup(struct ahash_request *req); * -EBUSY if queue is full and request should be resubmitted later; * other < 0 if an error occurred */ -int crypto_ahash_final(struct ahash_request *req); +static inline int crypto_ahash_final(struct ahash_request *req) +{ + req->nbytes = 0; + return crypto_ahash_finup(req); +} /** * crypto_ahash_digest() - calculate message digest for a buffer -- 2.51.0 From a05a8bc5705165fe3ce9ea79048a5ff807e4651f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 15 May 2025 13:54:40 +0800 Subject: [PATCH 07/16] crypto: hmac - Zero shash desc in setkey The shash desc needs to be zeroed after use in setkey as it is not finalised (finalisation automatically zeroes it). Also remove the final function as it's been superseded by finup. Signed-off-by: Herbert Xu --- crypto/hmac.c | 35 ++++++++++------------------------- 1 file changed, 10 insertions(+), 25 deletions(-) diff --git a/crypto/hmac.c b/crypto/hmac.c index ba36ddf50037..4517e04bfbaa 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -13,13 +13,11 @@ #include #include -#include #include #include -#include #include #include -#include +#include #include struct hmac_ctx { @@ -39,7 +37,7 @@ static int hmac_setkey(struct crypto_shash *parent, u8 *ipad = &tctx->pads[0]; u8 *opad = &tctx->pads[ss]; SHASH_DESC_ON_STACK(shash, hash); - unsigned int i; + int err, i; if (fips_enabled && (keylen < 112 / 8)) return -EINVAL; @@ -65,12 +63,14 @@ static int hmac_setkey(struct crypto_shash *parent, opad[i] ^= HMAC_OPAD_VALUE; } - return crypto_shash_init(shash) ?: - crypto_shash_update(shash, ipad, bs) ?: - crypto_shash_export(shash, ipad) ?: - crypto_shash_init(shash) ?: - crypto_shash_update(shash, opad, bs) ?: - crypto_shash_export(shash, opad); + err = crypto_shash_init(shash) ?: + crypto_shash_update(shash, ipad, bs) ?: + crypto_shash_export(shash, ipad) ?: + crypto_shash_init(shash) ?: + crypto_shash_update(shash, opad, bs) ?: + crypto_shash_export(shash, opad); + shash_desc_zero(shash); + return err; } static int hmac_export(struct shash_desc *pdesc, void *out) @@ -105,20 +105,6 @@ static int hmac_update(struct shash_desc *pdesc, return crypto_shash_update(desc, data, nbytes); } -static int hmac_final(struct shash_desc *pdesc, u8 *out) -{ - struct crypto_shash *parent = pdesc->tfm; - int ds = crypto_shash_digestsize(parent); - int ss = crypto_shash_statesize(parent); - const struct hmac_ctx *tctx = crypto_shash_ctx(parent); - const u8 *opad = &tctx->pads[ss]; - struct shash_desc *desc = shash_desc_ctx(pdesc); - - return crypto_shash_final(desc, out) ?: - crypto_shash_import(desc, opad) ?: - crypto_shash_finup(desc, out, ds, out); -} - static int hmac_finup(struct shash_desc *pdesc, const u8 *data, unsigned int nbytes, u8 *out) { @@ -222,7 +208,6 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.descsize = sizeof(struct shash_desc) + salg->descsize; inst->alg.init = hmac_init; inst->alg.update = hmac_update; - inst->alg.final = hmac_final; inst->alg.finup = hmac_finup; inst->alg.export = hmac_export; inst->alg.import = hmac_import; -- 2.51.0 From 8cf4c341f1931c20c564ab2ee0f9eb990a606cac Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 15 May 2025 13:54:42 +0800 Subject: [PATCH 08/16] crypto: hmac - Add export_core and import_core Add export_import and import_core so that hmac can be used as a fallback by block-only drivers. Signed-off-by: Herbert Xu --- crypto/hmac.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/crypto/hmac.c b/crypto/hmac.c index 4517e04bfbaa..e4749a1f93dd 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -90,6 +90,22 @@ static int hmac_import(struct shash_desc *pdesc, const void *in) return crypto_shash_import(desc, in); } +static int hmac_export_core(struct shash_desc *pdesc, void *out) +{ + struct shash_desc *desc = shash_desc_ctx(pdesc); + + return crypto_shash_export_core(desc, out); +} + +static int hmac_import_core(struct shash_desc *pdesc, const void *in) +{ + const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm); + struct shash_desc *desc = shash_desc_ctx(pdesc); + + desc->tfm = tctx->hash; + return crypto_shash_import_core(desc, in); +} + static int hmac_init(struct shash_desc *pdesc) { const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm); @@ -177,6 +193,7 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) return -ENOMEM; spawn = shash_instance_ctx(inst); + mask |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE; err = crypto_grab_shash(spawn, shash_crypto_instance(inst), crypto_attr_alg_name(tb[1]), 0, mask); if (err) @@ -211,6 +228,8 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.finup = hmac_finup; inst->alg.export = hmac_export; inst->alg.import = hmac_import; + inst->alg.export_core = hmac_export_core; + inst->alg.import_core = hmac_import_core; inst->alg.setkey = hmac_setkey; inst->alg.init_tfm = hmac_init_tfm; inst->alg.clone_tfm = hmac_clone_tfm; -- 2.51.0 From 32a9fd8f498bffd99c5d5441015136206e351ae4 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 15 May 2025 13:54:44 +0800 Subject: [PATCH 09/16] crypto: shash - Set reqsize in shash_alg Make reqsize static for shash algorithms. Signed-off-by: Herbert Xu --- crypto/ahash.c | 1 - crypto/shash.c | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index cf8bbe7e54c0..bf8375bb32c9 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -286,7 +286,6 @@ static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm) crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & CRYPTO_TFM_NEED_KEY); - crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); return 0; } diff --git a/crypto/shash.c b/crypto/shash.c index 5bc74a72d5ad..37537d7995c7 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -511,6 +511,8 @@ static int shash_prepare_alg(struct shash_alg *alg) if (alg->statesize > HASH_MAX_STATESIZE) return -EINVAL; + base->cra_reqsize = sizeof(struct shash_desc) + alg->descsize; + return 0; } -- 2.51.0 From 91b6ff579dda7660a9d11778f9dc4dd0a879de22 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 15 May 2025 13:54:47 +0800 Subject: [PATCH 10/16] crypto: algapi - Add driver template support to crypto_inst_setname Add support to crypto_inst_setname for having a driver template name that differs from the algorithm template name. Signed-off-by: Herbert Xu --- crypto/algapi.c | 8 ++++---- include/crypto/algapi.h | 12 ++++++++++-- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/crypto/algapi.c b/crypto/algapi.c index 25b5519e3b71..e604d0d8b7b4 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -923,20 +923,20 @@ const char *crypto_attr_alg_name(struct rtattr *rta) } EXPORT_SYMBOL_GPL(crypto_attr_alg_name); -int crypto_inst_setname(struct crypto_instance *inst, const char *name, - struct crypto_alg *alg) +int __crypto_inst_setname(struct crypto_instance *inst, const char *name, + const char *driver, struct crypto_alg *alg) { if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name, alg->cra_name) >= CRYPTO_MAX_ALG_NAME) return -ENAMETOOLONG; if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", - name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + driver, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) return -ENAMETOOLONG; return 0; } -EXPORT_SYMBOL_GPL(crypto_inst_setname); +EXPORT_SYMBOL_GPL(__crypto_inst_setname); void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen) { diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 423e57eca351..188eface0a11 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -146,8 +146,16 @@ void *crypto_spawn_tfm2(struct crypto_spawn *spawn); struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret); const char *crypto_attr_alg_name(struct rtattr *rta); -int crypto_inst_setname(struct crypto_instance *inst, const char *name, - struct crypto_alg *alg); +int __crypto_inst_setname(struct crypto_instance *inst, const char *name, + const char *driver, struct crypto_alg *alg); + +#define crypto_inst_setname(inst, name, ...) \ + CONCATENATE(crypto_inst_setname_, COUNT_ARGS(__VA_ARGS__))( \ + inst, name, ##__VA_ARGS__) +#define crypto_inst_setname_1(inst, name, alg) \ + __crypto_inst_setname(inst, name, name, alg) +#define crypto_inst_setname_2(inst, name, driver, alg) \ + __crypto_inst_setname(inst, name, driver, alg) void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); int crypto_enqueue_request(struct crypto_queue *queue, -- 2.51.0 From 8e69871836669177d2d2a5acec5c8dca3319d2f1 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 15 May 2025 13:54:49 +0800 Subject: [PATCH 11/16] crypto: testmgr - Ignore EEXIST on shash allocation Soon hmac will support ahash. For compatibility hmac still supports shash so it is possible for two hmac algorithms to be registered at the same time. The shash algorithm will have the driver name "hmac-shash(XXX-driver)". Due to a quirk in the API, there is no way to locate the shash algorithm using the name "hmac(XXX-driver)". It has to be addressed as either "hmac(XXX)" or "hmac-shash(XXX-driver)". Looking it up with "hmac(XXX-driver)" will simply trigger the creation of another instance, and on the second instantiation this will fail with EEXIST. Catch the error EEXIST along with ENOENT since it is expected. If a real shash algorithm came this way, it would be addressed using the proper name "hmac-shash(XXX-driver)". Signed-off-by: Herbert Xu --- crypto/testmgr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index fc28000c27f5..ee682ad50e34 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -1869,7 +1869,7 @@ static int alloc_shash(const char *driver, u32 type, u32 mask, tfm = crypto_alloc_shash(driver, type, mask); if (IS_ERR(tfm)) { - if (PTR_ERR(tfm) == -ENOENT) { + if (PTR_ERR(tfm) == -ENOENT || PTR_ERR(tfm) == -EEXIST) { /* * This algorithm is only available through the ahash * API, not the shash API, so skip the shash tests. -- 2.51.0 From c3103416d5217655d707d9417aaf66f184e3d72f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 15 May 2025 13:54:51 +0800 Subject: [PATCH 12/16] crypto: hmac - Add ahash support Add ahash support to hmac so that drivers that can't do hmac in hardware do not have to implement duplicate copies of hmac. Signed-off-by: Herbert Xu --- crypto/ahash.c | 10 +- crypto/hmac.c | 338 +++++++++++++++++++++++++++++++-- include/crypto/hash.h | 3 +- include/crypto/internal/hash.h | 9 + 4 files changed, 345 insertions(+), 15 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index bf8375bb32c9..e10bc2659ae4 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -846,7 +846,7 @@ int crypto_has_ahash(const char *alg_name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_has_ahash); -static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) { struct crypto_alg *alg = &halg->base; @@ -855,6 +855,7 @@ static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey; } +EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash) { @@ -1077,5 +1078,12 @@ int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data, } EXPORT_SYMBOL_GPL(crypto_hash_digest); +void ahash_free_singlespawn_instance(struct ahash_instance *inst) +{ + crypto_drop_spawn(ahash_instance_ctx(inst)); + kfree(inst); +} +EXPORT_SYMBOL_GPL(ahash_free_singlespawn_instance); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); diff --git a/crypto/hmac.c b/crypto/hmac.c index e4749a1f93dd..148af460ae97 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -26,6 +26,12 @@ struct hmac_ctx { u8 pads[]; }; +struct ahash_hmac_ctx { + struct crypto_ahash *hash; + /* Contains 'u8 ipad[statesize];', then 'u8 opad[statesize];' */ + u8 pads[]; +}; + static int hmac_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { @@ -173,21 +179,17 @@ static void hmac_exit_tfm(struct crypto_shash *parent) crypto_free_shash(tctx->hash); } -static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) +static int __hmac_create_shash(struct crypto_template *tmpl, + struct rtattr **tb, u32 mask) { struct shash_instance *inst; struct crypto_shash_spawn *spawn; struct crypto_alg *alg; struct shash_alg *salg; - u32 mask; int err; int ds; int ss; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); - if (err) - return err; - inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return -ENOMEM; @@ -212,7 +214,8 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) ss < alg->cra_blocksize) goto err_free_inst; - err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg); + err = crypto_inst_setname(shash_crypto_instance(inst), "hmac", + "hmac-shash", alg); if (err) goto err_free_inst; @@ -245,20 +248,329 @@ err_free_inst: return err; } -static struct crypto_template hmac_tmpl = { - .name = "hmac", - .create = hmac_create, - .module = THIS_MODULE, +static int hmac_setkey_ahash(struct crypto_ahash *parent, + const u8 *inkey, unsigned int keylen) +{ + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent); + struct crypto_ahash *fb = crypto_ahash_fb(tctx->hash); + int ds = crypto_ahash_digestsize(parent); + int bs = crypto_ahash_blocksize(parent); + int ss = crypto_ahash_statesize(parent); + HASH_REQUEST_ON_STACK(req, fb); + u8 *opad = &tctx->pads[ss]; + u8 *ipad = &tctx->pads[0]; + int err, i; + + if (fips_enabled && (keylen < 112 / 8)) + return -EINVAL; + + ahash_request_set_callback(req, 0, NULL, NULL); + + if (keylen > bs) { + ahash_request_set_virt(req, inkey, ipad, keylen); + err = crypto_ahash_digest(req); + if (err) + goto out_zero_req; + + keylen = ds; + } else + memcpy(ipad, inkey, keylen); + + memset(ipad + keylen, 0, bs - keylen); + memcpy(opad, ipad, bs); + + for (i = 0; i < bs; i++) { + ipad[i] ^= HMAC_IPAD_VALUE; + opad[i] ^= HMAC_OPAD_VALUE; + } + + ahash_request_set_virt(req, ipad, NULL, bs); + err = crypto_ahash_init(req) ?: + crypto_ahash_update(req) ?: + crypto_ahash_export(req, ipad); + + ahash_request_set_virt(req, opad, NULL, bs); + err = err ?: + crypto_ahash_init(req) ?: + crypto_ahash_update(req) ?: + crypto_ahash_export(req, opad); + +out_zero_req: + HASH_REQUEST_ZERO(req); + return err; +} + +static int hmac_export_ahash(struct ahash_request *preq, void *out) +{ + return crypto_ahash_export(ahash_request_ctx(preq), out); +} + +static int hmac_import_ahash(struct ahash_request *preq, const void *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq); + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm); + struct ahash_request *req = ahash_request_ctx(preq); + + ahash_request_set_tfm(req, tctx->hash); + return crypto_ahash_import(req, in); +} + +static int hmac_export_core_ahash(struct ahash_request *preq, void *out) +{ + return crypto_ahash_export_core(ahash_request_ctx(preq), out); +} + +static int hmac_import_core_ahash(struct ahash_request *preq, const void *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq); + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm); + struct ahash_request *req = ahash_request_ctx(preq); + + ahash_request_set_tfm(req, tctx->hash); + return crypto_ahash_import_core(req, in); +} + +static int hmac_init_ahash(struct ahash_request *preq) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq); + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm); + + return hmac_import_ahash(preq, &tctx->pads[0]); +} + +static int hmac_update_ahash(struct ahash_request *preq) +{ + struct ahash_request *req = ahash_request_ctx(preq); + + ahash_request_set_callback(req, ahash_request_flags(preq), + preq->base.complete, preq->base.data); + if (ahash_request_isvirt(preq)) + ahash_request_set_virt(req, preq->svirt, NULL, preq->nbytes); + else + ahash_request_set_crypt(req, preq->src, NULL, preq->nbytes); + return crypto_ahash_update(req); +} + +static int hmac_finup_finish(struct ahash_request *preq, unsigned int mask) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq); + struct ahash_request *req = ahash_request_ctx(preq); + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm); + int ds = crypto_ahash_digestsize(tfm); + int ss = crypto_ahash_statesize(tfm); + const u8 *opad = &tctx->pads[ss]; + + ahash_request_set_callback(req, ahash_request_flags(preq) & ~mask, + preq->base.complete, preq->base.data); + ahash_request_set_virt(req, preq->result, preq->result, ds); + return crypto_ahash_import(req, opad) ?: + crypto_ahash_finup(req); + +} + +static void hmac_finup_done(void *data, int err) +{ + struct ahash_request *preq = data; + + if (err) + goto out; + + err = hmac_finup_finish(preq, CRYPTO_TFM_REQ_MAY_SLEEP); + if (err == -EINPROGRESS || err == -EBUSY) + return; + +out: + ahash_request_complete(preq, err); +} + +static int hmac_finup_ahash(struct ahash_request *preq) +{ + struct ahash_request *req = ahash_request_ctx(preq); + + ahash_request_set_callback(req, ahash_request_flags(preq), + hmac_finup_done, preq); + if (ahash_request_isvirt(preq)) + ahash_request_set_virt(req, preq->svirt, preq->result, + preq->nbytes); + else + ahash_request_set_crypt(req, preq->src, preq->result, + preq->nbytes); + return crypto_ahash_finup(req) ?: + hmac_finup_finish(preq, 0); +} + +static int hmac_digest_ahash(struct ahash_request *preq) +{ + return hmac_init_ahash(preq) ?: + hmac_finup_ahash(preq); +} + +static int hmac_init_ahash_tfm(struct crypto_ahash *parent) +{ + struct ahash_instance *inst = ahash_alg_instance(parent); + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent); + struct crypto_ahash *hash; + + hash = crypto_spawn_ahash(ahash_instance_ctx(inst)); + if (IS_ERR(hash)) + return PTR_ERR(hash); + + if (crypto_ahash_reqsize(parent) < sizeof(struct ahash_request) + + crypto_ahash_reqsize(hash)) + return -EINVAL; + + tctx->hash = hash; + return 0; +} + +static int hmac_clone_ahash_tfm(struct crypto_ahash *dst, + struct crypto_ahash *src) +{ + struct ahash_hmac_ctx *sctx = crypto_ahash_ctx(src); + struct ahash_hmac_ctx *dctx = crypto_ahash_ctx(dst); + struct crypto_ahash *hash; + + hash = crypto_clone_ahash(sctx->hash); + if (IS_ERR(hash)) + return PTR_ERR(hash); + + dctx->hash = hash; + return 0; +} + +static void hmac_exit_ahash_tfm(struct crypto_ahash *parent) +{ + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent); + + crypto_free_ahash(tctx->hash); +} + +static int hmac_create_ahash(struct crypto_template *tmpl, struct rtattr **tb, + u32 mask) +{ + struct crypto_ahash_spawn *spawn; + struct ahash_instance *inst; + struct crypto_alg *alg; + struct hash_alg_common *halg; + int ds, ss, err; + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; + spawn = ahash_instance_ctx(inst); + + mask |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE; + err = crypto_grab_ahash(spawn, ahash_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); + if (err) + goto err_free_inst; + halg = crypto_spawn_ahash_alg(spawn); + alg = &halg->base; + + /* The underlying hash algorithm must not require a key */ + err = -EINVAL; + if (crypto_hash_alg_needs_key(halg)) + goto err_free_inst; + + ds = halg->digestsize; + ss = halg->statesize; + if (ds > alg->cra_blocksize || ss < alg->cra_blocksize) + goto err_free_inst; + + err = crypto_inst_setname(ahash_crypto_instance(inst), tmpl->name, alg); + if (err) + goto err_free_inst; + + inst->alg.halg.base.cra_flags = alg->cra_flags & + CRYPTO_ALG_INHERITED_FLAGS; + inst->alg.halg.base.cra_flags |= CRYPTO_ALG_REQ_VIRT; + inst->alg.halg.base.cra_priority = alg->cra_priority + 100; + inst->alg.halg.base.cra_blocksize = alg->cra_blocksize; + inst->alg.halg.base.cra_ctxsize = sizeof(struct ahash_hmac_ctx) + + (ss * 2); + inst->alg.halg.base.cra_reqsize = sizeof(struct ahash_request) + + alg->cra_reqsize; + + inst->alg.halg.digestsize = ds; + inst->alg.halg.statesize = ss; + inst->alg.init = hmac_init_ahash; + inst->alg.update = hmac_update_ahash; + inst->alg.finup = hmac_finup_ahash; + inst->alg.digest = hmac_digest_ahash; + inst->alg.export = hmac_export_ahash; + inst->alg.import = hmac_import_ahash; + inst->alg.export_core = hmac_export_core_ahash; + inst->alg.import_core = hmac_import_core_ahash; + inst->alg.setkey = hmac_setkey_ahash; + inst->alg.init_tfm = hmac_init_ahash_tfm; + inst->alg.clone_tfm = hmac_clone_ahash_tfm; + inst->alg.exit_tfm = hmac_exit_ahash_tfm; + + inst->free = ahash_free_singlespawn_instance; + + err = ahash_register_instance(tmpl, inst); + if (err) { +err_free_inst: + ahash_free_singlespawn_instance(inst); + } + return err; +} + +static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct crypto_attr_type *algt; + u32 mask; + + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return PTR_ERR(algt); + + mask = crypto_algt_inherited_mask(algt); + + if (!((algt->type ^ CRYPTO_ALG_TYPE_AHASH) & + algt->mask & CRYPTO_ALG_TYPE_MASK)) + return hmac_create_ahash(tmpl, tb, mask); + + if ((algt->type ^ CRYPTO_ALG_TYPE_SHASH) & + algt->mask & CRYPTO_ALG_TYPE_MASK) + return -EINVAL; + + return __hmac_create_shash(tmpl, tb, mask); +} + +static int hmac_create_shash(struct crypto_template *tmpl, struct rtattr **tb) +{ + u32 mask; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); + if (err) + return err == -EINVAL ? -ENOENT : err; + + return __hmac_create_shash(tmpl, tb, mask); +} + +static struct crypto_template hmac_tmpls[] = { + { + .name = "hmac", + .create = hmac_create, + .module = THIS_MODULE, + }, + { + .name = "hmac-shash", + .create = hmac_create_shash, + .module = THIS_MODULE, + }, }; static int __init hmac_module_init(void) { - return crypto_register_template(&hmac_tmpl); + return crypto_register_templates(hmac_tmpls, ARRAY_SIZE(hmac_tmpls)); } static void __exit hmac_module_exit(void) { - crypto_unregister_template(&hmac_tmpl); + crypto_unregister_templates(hmac_tmpls, ARRAY_SIZE(hmac_tmpls)); } module_init(hmac_module_init); diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 05ee817a3180..6f6b9de12cd3 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -185,7 +185,8 @@ struct shash_desc { * containing a 'struct s390_sha_ctx'. */ #define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360) -#define MAX_SYNC_HASH_REQSIZE HASH_MAX_DESCSIZE +#define MAX_SYNC_HASH_REQSIZE (sizeof(struct ahash_request) + \ + HASH_MAX_DESCSIZE) #define SHASH_DESC_ON_STACK(shash, ctx) \ char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \ diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index e9de2bc34a10..0f85c543f80b 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -67,6 +67,7 @@ int crypto_register_ahashes(struct ahash_alg *algs, int count); void crypto_unregister_ahashes(struct ahash_alg *algs, int count); int ahash_register_instance(struct crypto_template *tmpl, struct ahash_instance *inst); +void ahash_free_singlespawn_instance(struct ahash_instance *inst); int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen); @@ -76,12 +77,20 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg) return alg->setkey != shash_no_setkey; } +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg); + static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg) { return crypto_shash_alg_has_setkey(alg) && !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY); } +static inline bool crypto_hash_alg_needs_key(struct hash_alg_common *alg) +{ + return crypto_hash_alg_has_setkey(alg) && + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY); +} + int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, struct crypto_instance *inst, const char *name, u32 type, u32 mask); -- 2.51.0 From c2a813545ed4810540dcc8ebdcb21092fda80be5 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 15 May 2025 13:54:54 +0800 Subject: [PATCH 13/16] crypto: testmgr - Use ahash for generic tfm As shash is being phased out, use ahash for the generic tfm. Signed-off-by: Herbert Xu --- crypto/testmgr.c | 37 ++++++++++++++++++------------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index ee682ad50e34..72005074a5c2 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -1699,7 +1699,7 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num, * Assumes the buffers in 'vec' were already allocated. */ static void generate_random_hash_testvec(struct rnd_state *rng, - struct shash_desc *desc, + struct ahash_request *req, struct hash_testvec *vec, unsigned int maxkeysize, unsigned int maxdatasize, @@ -1721,16 +1721,17 @@ static void generate_random_hash_testvec(struct rnd_state *rng, vec->ksize = prandom_u32_inclusive(rng, 1, maxkeysize); generate_random_bytes(rng, (u8 *)vec->key, vec->ksize); - vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key, - vec->ksize); + vec->setkey_error = crypto_ahash_setkey( + crypto_ahash_reqtfm(req), vec->key, vec->ksize); /* If the key couldn't be set, no need to continue to digest. */ if (vec->setkey_error) goto done; } /* Digest */ - vec->digest_error = crypto_shash_digest(desc, vec->plaintext, - vec->psize, (u8 *)vec->digest); + vec->digest_error = crypto_hash_digest( + crypto_ahash_reqtfm(req), vec->plaintext, + vec->psize, (u8 *)vec->digest); done: snprintf(name, max_namelen, "\"random: psize=%u ksize=%u\"", vec->psize, vec->ksize); @@ -1755,8 +1756,8 @@ static int test_hash_vs_generic_impl(const char *generic_driver, const char *driver = crypto_ahash_driver_name(tfm); struct rnd_state rng; char _generic_driver[CRYPTO_MAX_ALG_NAME]; - struct crypto_shash *generic_tfm = NULL; - struct shash_desc *generic_desc = NULL; + struct ahash_request *generic_req = NULL; + struct crypto_ahash *generic_tfm = NULL; unsigned int i; struct hash_testvec vec = { 0 }; char vec_name[64]; @@ -1779,7 +1780,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver, if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */ return 0; - generic_tfm = crypto_alloc_shash(generic_driver, 0, 0); + generic_tfm = crypto_alloc_ahash(generic_driver, 0, 0); if (IS_ERR(generic_tfm)) { err = PTR_ERR(generic_tfm); if (err == -ENOENT) { @@ -1798,27 +1799,25 @@ static int test_hash_vs_generic_impl(const char *generic_driver, goto out; } - generic_desc = kzalloc(sizeof(*desc) + - crypto_shash_descsize(generic_tfm), GFP_KERNEL); - if (!generic_desc) { + generic_req = ahash_request_alloc(generic_tfm, GFP_KERNEL); + if (!generic_req) { err = -ENOMEM; goto out; } - generic_desc->tfm = generic_tfm; /* Check the algorithm properties for consistency. */ - if (digestsize != crypto_shash_digestsize(generic_tfm)) { + if (digestsize != crypto_ahash_digestsize(generic_tfm)) { pr_err("alg: hash: digestsize for %s (%u) doesn't match generic impl (%u)\n", driver, digestsize, - crypto_shash_digestsize(generic_tfm)); + crypto_ahash_digestsize(generic_tfm)); err = -EINVAL; goto out; } - if (blocksize != crypto_shash_blocksize(generic_tfm)) { + if (blocksize != crypto_ahash_blocksize(generic_tfm)) { pr_err("alg: hash: blocksize for %s (%u) doesn't match generic impl (%u)\n", - driver, blocksize, crypto_shash_blocksize(generic_tfm)); + driver, blocksize, crypto_ahash_blocksize(generic_tfm)); err = -EINVAL; goto out; } @@ -1837,7 +1836,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver, } for (i = 0; i < fuzz_iterations * 8; i++) { - generate_random_hash_testvec(&rng, generic_desc, &vec, + generate_random_hash_testvec(&rng, generic_req, &vec, maxkeysize, maxdatasize, vec_name, sizeof(vec_name)); generate_random_testvec_config(&rng, cfg, cfgname, @@ -1855,8 +1854,8 @@ out: kfree(vec.key); kfree(vec.plaintext); kfree(vec.digest); - crypto_free_shash(generic_tfm); - kfree_sensitive(generic_desc); + ahash_request_free(generic_req); + crypto_free_ahash(generic_tfm); return err; } -- 2.51.0 From 18c438b228558e05ede7dccf947a6547516fc0c7 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 15 May 2025 13:54:56 +0800 Subject: [PATCH 14/16] crypto: testmgr - Add hash export format testing Ensure that the hash state can be exported to and imported from the generic algorithm. Signed-off-by: Herbert Xu --- crypto/testmgr.c | 95 ++++++++++++++++++++++++++++++---- crypto/testmgr.h | 2 + include/crypto/internal/hash.h | 6 +++ 3 files changed, 94 insertions(+), 9 deletions(-) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 72005074a5c2..737064b31480 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -17,10 +17,19 @@ */ #include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include #include +#include #include #include #include @@ -28,14 +37,6 @@ #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include #include "internal.h" @@ -1464,6 +1465,49 @@ static int check_nonfinal_ahash_op(const char *op, int err, return 0; } +static int check_ahash_export(struct ahash_request *req, + const struct hash_testvec *vec, + const char *vec_name, + const struct testvec_config *cfg, + const char *driver, u8 *hashstate) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + const unsigned int digestsize = crypto_ahash_digestsize(tfm); + HASH_FBREQ_ON_STACK(fbreq, req); + int err; + + if (!vec->state) + return 0; + + err = crypto_ahash_export(req, hashstate); + if (err) { + pr_err("alg: ahash: %s mixed export() failed with err %d on test vector %s, cfg=\"%s\"\n", + driver, err, vec_name, cfg->name); + return err; + } + err = crypto_ahash_import(req, vec->state); + if (err) { + pr_err("alg: ahash: %s mixed import() failed with err %d on test vector %s, cfg=\"%s\"\n", + driver, err, vec_name, cfg->name); + return err; + } + err = crypto_ahash_import(fbreq, hashstate); + if (err) { + pr_err("alg: ahash: %s fallback import() failed with err %d on test vector %s, cfg=\"%s\"\n", + crypto_ahash_driver_name(crypto_ahash_reqtfm(fbreq)), err, vec_name, cfg->name); + return err; + } + ahash_request_set_crypt(fbreq, NULL, hashstate, 0); + testmgr_poison(hashstate, digestsize + TESTMGR_POISON_LEN); + err = crypto_ahash_final(fbreq); + if (err) { + pr_err("alg: ahash: %s fallback final() failed with err %d on test vector %s, cfg=\"%s\"\n", + crypto_ahash_driver_name(crypto_ahash_reqtfm(fbreq)), err, vec_name, cfg->name); + return err; + } + return check_hash_result("ahash export", hashstate, digestsize, vec, vec_name, driver, cfg); +} + /* Test one hash test vector in one configuration, using the ahash API */ static int test_ahash_vec_cfg(const struct hash_testvec *vec, const char *vec_name, @@ -1609,6 +1653,10 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec, driver, vec_name, cfg); if (err) return err; + err = check_ahash_export(req, vec, vec_name, cfg, + driver, hashstate); + if (err) + return err; err = do_ahash_op(crypto_ahash_final, req, &wait, cfg->nosimd); if (err) { pr_err("alg: ahash: %s final() failed with err %d on test vector %s, cfg=\"%s\"\n", @@ -1732,6 +1780,17 @@ static void generate_random_hash_testvec(struct rnd_state *rng, vec->digest_error = crypto_hash_digest( crypto_ahash_reqtfm(req), vec->plaintext, vec->psize, (u8 *)vec->digest); + + if (vec->digest_error || !vec->state) + goto done; + + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); + ahash_request_set_virt(req, vec->plaintext, (u8 *)vec->digest, + vec->psize); + crypto_ahash_init(req); + crypto_ahash_update(req); + crypto_ahash_export(req, (u8 *)vec->state); + done: snprintf(name, max_namelen, "\"random: psize=%u ksize=%u\"", vec->psize, vec->ksize); @@ -1750,6 +1809,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver, { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); const unsigned int digestsize = crypto_ahash_digestsize(tfm); + const unsigned int statesize = crypto_ahash_statesize(tfm); const unsigned int blocksize = crypto_ahash_blocksize(tfm); const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN; const char *algname = crypto_hash_alg_common(tfm)->base.cra_name; @@ -1822,6 +1882,22 @@ static int test_hash_vs_generic_impl(const char *generic_driver, goto out; } + if (crypto_hash_no_export_core(tfm) || + crypto_hash_no_export_core(generic_tfm)) + ; + else if (statesize != crypto_ahash_statesize(generic_tfm)) { + pr_err("alg: hash: statesize for %s (%u) doesn't match generic impl (%u)\n", + driver, statesize, + crypto_ahash_statesize(generic_tfm)); + err = -EINVAL; + goto out; + } else { + vec.state = kmalloc(statesize, GFP_KERNEL); + err = -ENOMEM; + if (!vec.state) + goto out; + } + /* * Now generate test vectors using the generic implementation, and test * the other implementation against them. @@ -1854,6 +1930,7 @@ out: kfree(vec.key); kfree(vec.plaintext); kfree(vec.digest); + kfree(vec.state); ahash_request_free(generic_req); crypto_free_ahash(generic_tfm); return err; diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 32d099ac9e73..5cf455a708b8 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -29,6 +29,7 @@ * hash_testvec: structure to describe a hash (message digest) test * @key: Pointer to key (NULL if none) * @plaintext: Pointer to source data + * @state: Pointer to expected state * @digest: Pointer to expected digest * @psize: Length of source data in bytes * @ksize: Length of @key in bytes (0 if no key) @@ -39,6 +40,7 @@ struct hash_testvec { const char *key; const char *plaintext; + const char *state; const char *digest; unsigned int psize; unsigned short ksize; diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 0f85c543f80b..f052afa6e7b0 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -91,6 +91,12 @@ static inline bool crypto_hash_alg_needs_key(struct hash_alg_common *alg) !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY); } +static inline bool crypto_hash_no_export_core(struct crypto_ahash *tfm) +{ + return crypto_hash_alg_common(tfm)->base.cra_flags & + CRYPTO_AHASH_ALG_NO_EXPORT_CORE; +} + int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, struct crypto_instance *inst, const char *name, u32 type, u32 mask); -- 2.51.0 From 3d73909bddc2ebb3224a8bc2e5ce00e9df70c15d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 15 May 2025 16:28:08 +0800 Subject: [PATCH 15/16] crypto: lrw - Only add ecb if it is not already there Only add ecb to the cipher name if it isn't already ecb. Also use memcmp instead of strncmp since these strings are all stored in an array of length CRYPTO_MAX_ALG_NAME. Fixes: 700cb3f5fe75 ("crypto: lrw - Convert to skcipher") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-lkp/202505151503.d8a6cf10-lkp@intel.com Signed-off-by: Herbert Xu --- crypto/lrw.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crypto/lrw.c b/crypto/lrw.c index e7f0368f8c97..dd403b800513 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -322,7 +322,7 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst), cipher_name, 0, mask); - if (err == -ENOENT) { + if (err == -ENOENT && memcmp(cipher_name, "ecb(", 4)) { err = -ENAMETOOLONG; if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) @@ -356,7 +356,7 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) /* Alas we screwed up the naming so we have to mangle the * cipher name. */ - if (!strncmp(cipher_name, "ecb(", 4)) { + if (!memcmp(cipher_name, "ecb(", 4)) { int len; len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); -- 2.51.0 From 270b6f13454cb7f2f7058c50df64df409c5dcf55 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 15 May 2025 16:34:04 +0800 Subject: [PATCH 16/16] crypto: xts - Only add ecb if it is not already there Only add ecb to the cipher name if it isn't already ecb. Also use memcmp instead of strncmp since these strings are all stored in an array of length CRYPTO_MAX_ALG_NAME. Fixes: f1c131b45410 ("crypto: xts - Convert to skcipher") Signed-off-by: Herbert Xu --- crypto/xts.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crypto/xts.c b/crypto/xts.c index 1a9edd55a3a2..3da8f5e053d6 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -363,7 +363,7 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), cipher_name, 0, mask); - if (err == -ENOENT) { + if (err == -ENOENT && memcmp(cipher_name, "ecb(", 4)) { err = -ENAMETOOLONG; if (snprintf(name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) @@ -397,7 +397,7 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) /* Alas we screwed up the naming so we have to mangle the * cipher name. */ - if (!strncmp(cipher_name, "ecb(", 4)) { + if (!memcmp(cipher_name, "ecb(", 4)) { int len; len = strscpy(name, cipher_name + 4, sizeof(name)); -- 2.51.0