#include <linux/crypto.h>
 #include <linux/reset.h>
 
+static struct rockchip_ip rocklist = {
+       .dev_list = LIST_HEAD_INIT(rocklist.dev_list),
+       .lock = __SPIN_LOCK_UNLOCKED(rocklist.lock),
+};
+
+struct rk_crypto_info *get_rk_crypto(void)
+{
+       struct rk_crypto_info *first;
+
+       spin_lock(&rocklist.lock);
+       first = list_first_entry_or_null(&rocklist.dev_list,
+                                        struct rk_crypto_info, list);
+       list_rotate_left(&rocklist.dev_list);
+       spin_unlock(&rocklist.lock);
+       return first;
+}
+
 static const struct rk_variant rk3288_variant = {
        .num_clks = 4,
        .rkclks = {
        .num_clks = 3,
 };
 
+static const struct rk_variant rk3399_variant = {
+       .num_clks = 3,
+};
+
 static int rk_crypto_get_clks(struct rk_crypto_info *dev)
 {
        int i, j, err;
 }
 
 /*
- * Power management strategy: The device is suspended unless a TFM exists for
- * one of the algorithms proposed by this driver.
+ * Power management strategy: The device is suspended until a request
+ * is handled. For avoiding suspend/resume yoyo, the autosuspend is set to 2s.
  */
 static int rk_crypto_pm_suspend(struct device *dev)
 {
 #ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
 static int rk_crypto_debugfs_show(struct seq_file *seq, void *v)
 {
+       struct rk_crypto_info *dd;
        unsigned int i;
 
+       spin_lock(&rocklist.lock);
+       list_for_each_entry(dd, &rocklist.dev_list, list) {
+               seq_printf(seq, "%s %s requests: %lu\n",
+                          dev_driver_string(dd->dev), dev_name(dd->dev),
+                          dd->nreq);
+       }
+       spin_unlock(&rocklist.lock);
+
        for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
                if (!rk_cipher_algs[i]->dev)
                        continue;
 DEFINE_SHOW_ATTRIBUTE(rk_crypto_debugfs);
 #endif
 
+static void register_debugfs(struct rk_crypto_info *crypto_info)
+{
+#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
+       /* Ignore error of debugfs */
+       rocklist.dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL);
+       rocklist.dbgfs_stats = debugfs_create_file("stats", 0444,
+                                                  rocklist.dbgfs_dir,
+                                                  &rocklist,
+                                                  &rk_crypto_debugfs_fops);
+#endif
+}
+
 static int rk_crypto_register(struct rk_crypto_info *crypto_info)
 {
        unsigned int i, k;
        { .compatible = "rockchip,rk3328-crypto",
          .data = &rk3328_variant,
        },
+       { .compatible = "rockchip,rk3399-crypto",
+         .data = &rk3399_variant,
+       },
        {}
 };
 MODULE_DEVICE_TABLE(of, crypto_of_id_table);
 static int rk_crypto_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct rk_crypto_info *crypto_info;
+       struct rk_crypto_info *crypto_info, *first;
        int err = 0;
 
        crypto_info = devm_kzalloc(&pdev->dev,
        if (err)
                goto err_pm;
 
-       err = rk_crypto_register(crypto_info);
-       if (err) {
-               dev_err(dev, "err in register alg");
-               goto err_register_alg;
-       }
+       spin_lock(&rocklist.lock);
+       first = list_first_entry_or_null(&rocklist.dev_list,
+                                        struct rk_crypto_info, list);
+       list_add_tail(&crypto_info->list, &rocklist.dev_list);
+       spin_unlock(&rocklist.lock);
+
+       if (!first) {
+               err = rk_crypto_register(crypto_info);
+               if (err) {
+                       dev_err(dev, "Fail to register crypto algorithms");
+                       goto err_register_alg;
+               }
 
-#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
-       /* Ignore error of debugfs */
-       crypto_info->dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL);
-       crypto_info->dbgfs_stats = debugfs_create_file("stats", 0444,
-                                                      crypto_info->dbgfs_dir,
-                                                      crypto_info,
-                                                      &rk_crypto_debugfs_fops);
-#endif
+               register_debugfs(crypto_info);
+       }
 
-       dev_info(dev, "Crypto Accelerator successfully registered\n");
        return 0;
 
 err_register_alg:
 static int rk_crypto_remove(struct platform_device *pdev)
 {
        struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
+       struct rk_crypto_info *first;
+
+       spin_lock_bh(&rocklist.lock);
+       list_del(&crypto_tmp->list);
+       first = list_first_entry_or_null(&rocklist.dev_list,
+                                        struct rk_crypto_info, list);
+       spin_unlock_bh(&rocklist.lock);
 
+       if (!first) {
 #ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
-       debugfs_remove_recursive(crypto_tmp->dbgfs_dir);
+               debugfs_remove_recursive(rocklist.dbgfs_dir);
 #endif
-       rk_crypto_unregister();
+               rk_crypto_unregister();
+       }
        rk_crypto_pm_exit(crypto_tmp);
        crypto_engine_exit(crypto_tmp->engine);
        return 0;
 
 
 #define RK_MAX_CLKS 4
 
+/*
+ * struct rockchip_ip - struct for managing a list of RK crypto instance
+ * @dev_list:          Used for doing a list of rk_crypto_info
+ * @lock:              Control access to dev_list
+ * @dbgfs_dir:         Debugfs dentry for statistic directory
+ * @dbgfs_stats:       Debugfs dentry for statistic counters
+ */
+struct rockchip_ip {
+       struct list_head        dev_list;
+       spinlock_t              lock; /* Control access to dev_list */
+       struct dentry           *dbgfs_dir;
+       struct dentry           *dbgfs_stats;
+};
+
 struct rk_clks {
        const char *name;
        unsigned long max;
 };
 
 struct rk_crypto_info {
+       struct list_head                list;
        struct device                   *dev;
        struct clk_bulk_data            *clks;
        int                             num_clks;
        void __iomem                    *reg;
        int                             irq;
        const struct rk_variant *variant;
+       unsigned long nreq;
        struct crypto_engine *engine;
        struct completion complete;
        int status;
-#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
-       struct dentry *dbgfs_dir;
-       struct dentry *dbgfs_stats;
-#endif
 };
 
 /* the private variable of hash */
 struct rk_ahash_ctx {
        struct crypto_engine_ctx enginectx;
-       struct rk_crypto_info           *dev;
        /* for fallback */
        struct crypto_ahash             *fallback_tfm;
 };
 /* the private variable of cipher */
 struct rk_cipher_ctx {
        struct crypto_engine_ctx enginectx;
-       struct rk_crypto_info           *dev;
        unsigned int                    keylen;
        u8                              key[AES_MAX_KEY_SIZE];
        u8                              iv[AES_BLOCK_SIZE];
 
 struct rk_crypto_tmp {
        u32 type;
-       struct rk_crypto_info           *dev;
+       struct rk_crypto_info           *dev;
        union {
                struct skcipher_alg     skcipher;
                struct ahash_alg        hash;
 extern struct rk_crypto_tmp rk_ahash_sha256;
 extern struct rk_crypto_tmp rk_ahash_md5;
 
+struct rk_crypto_info *get_rk_crypto(void);
 #endif
 
 static int rk_ahash_digest(struct ahash_request *req)
 {
        struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
-       struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
-       struct rk_crypto_info *dev = tctx->dev;
+       struct rk_crypto_info *dev;
+       struct crypto_engine *engine;
 
        if (rk_ahash_need_fallback(req))
                return rk_ahash_digest_fb(req);
        if (!req->nbytes)
                return zero_message_process(req);
 
+       dev = get_rk_crypto();
+
        rctx->dev = dev;
+       engine = dev->engine;
 
-       return crypto_transfer_hash_request_to_engine(dev->engine, req);
+       return crypto_transfer_hash_request_to_engine(engine, req);
 }
 
 static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg)
        int i;
        u32 v;
 
+       err = pm_runtime_resume_and_get(rkc->dev);
+       if (err)
+               return err;
+
        rctx->mode = 0;
 
        algt->stat_req++;
+       rkc->nreq++;
 
        switch (crypto_ahash_digestsize(tfm)) {
        case SHA1_DIGEST_SIZE:
        }
 
 theend:
+       pm_runtime_put_autosuspend(rkc->dev);
+
        local_bh_disable();
        crypto_finalize_hash_request(engine, breq, err);
        local_bh_enable();
 static int rk_cra_hash_init(struct crypto_tfm *tfm)
 {
        struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
-       struct rk_crypto_tmp *algt;
-       struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
-
        const char *alg_name = crypto_tfm_alg_name(tfm);
-       int err;
-
-       algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
-
-       tctx->dev = algt->dev;
+       struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+       struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
 
        /* for fallback */
        tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
                                                CRYPTO_ALG_NEED_FALLBACK);
        if (IS_ERR(tctx->fallback_tfm)) {
-               dev_err(tctx->dev->dev, "Could not load fallback driver.\n");
+               dev_err(algt->dev->dev, "Could not load fallback driver.\n");
                return PTR_ERR(tctx->fallback_tfm);
        }
 
        tctx->enginectx.op.prepare_request = rk_hash_prepare;
        tctx->enginectx.op.unprepare_request = rk_hash_unprepare;
 
-       err = pm_runtime_resume_and_get(tctx->dev->dev);
-       if (err < 0)
-               goto error_pm;
-
        return 0;
-error_pm:
-       crypto_free_ahash(tctx->fallback_tfm);
-
-       return err;
 }
 
 static void rk_cra_hash_exit(struct crypto_tfm *tfm)
        struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
 
        crypto_free_ahash(tctx->fallback_tfm);
-       pm_runtime_put_autosuspend(tctx->dev->dev);
 }
 
 struct rk_crypto_tmp rk_ahash_sha1 = {
 
 static int rk_cipher_need_fallback(struct skcipher_request *req)
 {
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-       unsigned int bs = crypto_skcipher_blocksize(tfm);
        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
        struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
        struct scatterlist *sgs, *sgd;
        unsigned int stodo, dtodo, len;
+       unsigned int bs = crypto_skcipher_blocksize(tfm);
 
        if (!req->cryptlen)
                return true;
 
 static int rk_cipher_handle_req(struct skcipher_request *req)
 {
-       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-       struct rk_cipher_ctx *tctx = crypto_skcipher_ctx(tfm);
        struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
-       struct rk_crypto_info *rkc = tctx->dev;
-       struct crypto_engine *engine = rkc->engine;
+       struct rk_crypto_info *rkc;
+       struct crypto_engine *engine;
 
        if (rk_cipher_need_fallback(req))
                return rk_cipher_fallback(req);
 
+       rkc = get_rk_crypto();
+
+       engine = rkc->engine;
        rctx->dev = rkc;
 
        return crypto_transfer_skcipher_request_to_engine(engine, req);
        struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
        struct rk_crypto_info *rkc = rctx->dev;
 
+       err = pm_runtime_resume_and_get(rkc->dev);
+       if (err)
+               return err;
+
        algt->stat_req++;
+       rkc->nreq++;
 
        ivsize = crypto_skcipher_ivsize(tfm);
        if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
        }
 
 theend:
+       pm_runtime_put_autosuspend(rkc->dev);
+
        local_bh_disable();
        crypto_finalize_skcipher_request(engine, areq, err);
        local_bh_enable();
 static int rk_cipher_tfm_init(struct crypto_skcipher *tfm)
 {
        struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
-       struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
        const char *name = crypto_tfm_alg_name(&tfm->base);
-       struct rk_crypto_tmp *algt;
-       int err;
-
-       algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
-
-       ctx->dev = algt->dev;
+       struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+       struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
 
        ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
        if (IS_ERR(ctx->fallback_tfm)) {
-               dev_err(ctx->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+               dev_err(algt->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
                        name, PTR_ERR(ctx->fallback_tfm));
                return PTR_ERR(ctx->fallback_tfm);
        }
 
        ctx->enginectx.op.do_one_request = rk_cipher_run;
 
-       err = pm_runtime_resume_and_get(ctx->dev->dev);
-       if (err < 0)
-               goto error_pm;
-
        return 0;
-error_pm:
-       crypto_free_skcipher(ctx->fallback_tfm);
-       return err;
 }
 
 static void rk_cipher_tfm_exit(struct crypto_skcipher *tfm)
 
        memzero_explicit(ctx->key, ctx->keylen);
        crypto_free_skcipher(ctx->fallback_tfm);
-       pm_runtime_put_autosuspend(ctx->dev->dev);
 }
 
 struct rk_crypto_tmp rk_ecb_aes_alg = {