]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
crypto: ccree - fix FDE descriptor sequence
authorOfir Drang <ofir.drang@arm.com>
Thu, 16 Jan 2020 10:14:42 +0000 (12:14 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 11 Feb 2020 12:36:53 +0000 (04:36 -0800)
commit 5c83e8ec4d51ac4cc58482ed04297e6882b32a09 upstream.

In FDE mode (xts, essiv and bitlocker) the cryptocell hardware requires
that the the XEX key will be loaded after Key1.

Signed-off-by: Ofir Drang <ofir.drang@arm.com>
Cc: stable@vger.kernel.org
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/crypto/ccree/cc_cipher.c

index 3112b58d0bb168b6f2ed86ca2d38c2702fd86f0e..56c9a8f264359bb6907ae122c2697aa475a76223 100644 (file)
@@ -523,6 +523,7 @@ static void cc_setup_readiv_desc(struct crypto_tfm *tfm,
        }
 }
 
+
 static void cc_setup_state_desc(struct crypto_tfm *tfm,
                                 struct cipher_req_ctx *req_ctx,
                                 unsigned int ivsize, unsigned int nbytes,
@@ -534,8 +535,6 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm,
        int cipher_mode = ctx_p->cipher_mode;
        int flow_mode = ctx_p->flow_mode;
        int direction = req_ctx->gen_ctx.op_type;
-       dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
-       unsigned int key_len = ctx_p->keylen;
        dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
        unsigned int du_size = nbytes;
 
@@ -570,6 +569,47 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm,
                break;
        case DRV_CIPHER_XTS:
        case DRV_CIPHER_ESSIV:
+       case DRV_CIPHER_BITLOCKER:
+               break;
+       default:
+               dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+       }
+}
+
+
+static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
+                                struct cipher_req_ctx *req_ctx,
+                                unsigned int ivsize, unsigned int nbytes,
+                                struct cc_hw_desc desc[],
+                                unsigned int *seq_size)
+{
+       struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+       struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+       int cipher_mode = ctx_p->cipher_mode;
+       int flow_mode = ctx_p->flow_mode;
+       int direction = req_ctx->gen_ctx.op_type;
+       dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
+       unsigned int key_len = ctx_p->keylen;
+       dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
+       unsigned int du_size = nbytes;
+
+       struct cc_crypto_alg *cc_alg =
+               container_of(tfm->__crt_alg, struct cc_crypto_alg,
+                            skcipher_alg.base);
+
+       if (cc_alg->data_unit)
+               du_size = cc_alg->data_unit;
+
+       switch (cipher_mode) {
+       case DRV_CIPHER_ECB:
+               break;
+       case DRV_CIPHER_CBC:
+       case DRV_CIPHER_CBC_CTS:
+       case DRV_CIPHER_CTR:
+       case DRV_CIPHER_OFB:
+               break;
+       case DRV_CIPHER_XTS:
+       case DRV_CIPHER_ESSIV:
        case DRV_CIPHER_BITLOCKER:
                /* load XEX key */
                hw_desc_init(&desc[*seq_size]);
@@ -881,12 +921,14 @@ static int cc_cipher_process(struct skcipher_request *req,
 
        /* STAT_PHASE_2: Create sequence */
 
-       /* Setup IV and XEX key used */
+       /* Setup state (IV)  */
        cc_setup_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
        /* Setup MLLI line, if needed */
        cc_setup_mlli_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len);
        /* Setup key */
        cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len);
+       /* Setup state (IV and XEX key)  */
+       cc_setup_xex_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
        /* Data processing */
        cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, desc, &seq_len);
        /* Read next IV */