This patch enable to access usage stats for each algorithm.
Reviewed-by: John Keeping <john@metanate.com>
Signed-off-by: Corentin Labbe <clabbe@baylibre.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
          This driver interfaces with the hardware crypto accelerator.
          Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode.
 
+config CRYPTO_DEV_ROCKCHIP_DEBUG
+       bool "Enable Rockchip crypto stats"
+       depends on CRYPTO_DEV_ROCKCHIP
+       depends on DEBUG_FS
+       help
+         Say y to enable Rockchip crypto debug stats.
+         This will create /sys/kernel/debug/rk3288_crypto/stats for displaying
+         the number of requests per algorithm and other internal stats.
+
+
 config CRYPTO_DEV_ZYNQMP_AES
        tristate "Support for Xilinx ZynqMP AES hw accelerator"
        depends on ZYNQMP_FIRMWARE || COMPILE_TEST
 
        &rk_ahash_md5,
 };
 
+#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
+static int rk_crypto_debugfs_show(struct seq_file *seq, void *v)
+{
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
+               if (!rk_cipher_algs[i]->dev)
+                       continue;
+               switch (rk_cipher_algs[i]->type) {
+               case CRYPTO_ALG_TYPE_SKCIPHER:
+                       seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
+                                  rk_cipher_algs[i]->alg.skcipher.base.cra_driver_name,
+                                  rk_cipher_algs[i]->alg.skcipher.base.cra_name,
+                                  rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb);
+                       seq_printf(seq, "\tfallback due to length: %lu\n",
+                                  rk_cipher_algs[i]->stat_fb_len);
+                       seq_printf(seq, "\tfallback due to alignment: %lu\n",
+                                  rk_cipher_algs[i]->stat_fb_align);
+                       seq_printf(seq, "\tfallback due to SGs: %lu\n",
+                                  rk_cipher_algs[i]->stat_fb_sgdiff);
+                       break;
+               case CRYPTO_ALG_TYPE_AHASH:
+                       seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
+                                  rk_cipher_algs[i]->alg.hash.halg.base.cra_driver_name,
+                                  rk_cipher_algs[i]->alg.hash.halg.base.cra_name,
+                                  rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb);
+                       break;
+               }
+       }
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(rk_crypto_debugfs);
+#endif
+
 static int rk_crypto_register(struct rk_crypto_info *crypto_info)
 {
        unsigned int i, k;
                goto err_register_alg;
        }
 
+#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
+       /* Ignore error of debugfs */
+       crypto_info->dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL);
+       crypto_info->dbgfs_stats = debugfs_create_file("stats", 0444,
+                                                      crypto_info->dbgfs_dir,
+                                                      crypto_info,
+                                                      &rk_crypto_debugfs_fops);
+#endif
+
        dev_info(dev, "Crypto Accelerator successfully registered\n");
        return 0;
 
 {
        struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
 
+#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
+       debugfs_remove_recursive(crypto_tmp->dbgfs_dir);
+#endif
        rk_crypto_unregister();
        rk_crypto_disable_clk(crypto_tmp);
        crypto_engine_exit(crypto_tmp->engine);
 
 #include <crypto/algapi.h>
 #include <linux/dma-mapping.h>
 #include <linux/interrupt.h>
+#include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/scatterlist.h>
 #include <crypto/engine.h>
        struct crypto_engine *engine;
        struct completion complete;
        int status;
+#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
+       struct dentry *dbgfs_dir;
+       struct dentry *dbgfs_stats;
+#endif
 };
 
 /* the private variable of hash */
                struct skcipher_alg     skcipher;
                struct ahash_alg        hash;
        } alg;
+       unsigned long stat_req;
+       unsigned long stat_fb;
+       unsigned long stat_fb_len;
+       unsigned long stat_fb_sglen;
+       unsigned long stat_fb_align;
+       unsigned long stat_fb_sgdiff;
 };
 
 extern struct rk_crypto_tmp rk_ecb_aes_alg;
 
        struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
        struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm);
+       struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+       struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
+
+       algt->stat_fb++;
 
        ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
        rctx->fallback_req.base.flags = areq->base.flags &
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
        struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
        struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
+       struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+       struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
        struct scatterlist *sg = areq->src;
        int err = 0;
        int i;
 
        rctx->mode = 0;
 
+       algt->stat_req++;
+
        switch (crypto_ahash_digestsize(tfm)) {
        case SHA1_DIGEST_SIZE:
                rctx->mode = RK_CRYPTO_HASH_SHA1;
 
 {
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
        unsigned int bs = crypto_skcipher_blocksize(tfm);
+       struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+       struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
        struct scatterlist *sgs, *sgd;
        unsigned int stodo, dtodo, len;
 
        sgd = req->dst;
        while (sgs && sgd) {
                if (!IS_ALIGNED(sgs->offset, sizeof(u32))) {
+                       algt->stat_fb_align++;
                        return true;
                }
                if (!IS_ALIGNED(sgd->offset, sizeof(u32))) {
+                       algt->stat_fb_align++;
                        return true;
                }
                stodo = min(len, sgs->length);
                if (stodo % bs) {
+                       algt->stat_fb_len++;
                        return true;
                }
                dtodo = min(len, sgd->length);
                if (dtodo % bs) {
+                       algt->stat_fb_len++;
                        return true;
                }
                if (stodo != dtodo) {
+                       algt->stat_fb_sgdiff++;
                        return true;
                }
                len -= stodo;
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
        struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm);
        struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
+       struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+       struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
        int err;
 
+       algt->stat_fb++;
+
        skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
        skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
                                      areq->base.complete, areq->base.data);
        u8 *ivtouse = areq->iv;
        unsigned int len = areq->cryptlen;
        unsigned int todo;
+       struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+       struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+
+       algt->stat_req++;
 
        ivsize = crypto_skcipher_ivsize(tfm);
        if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {