* so we put the fixed hash out when met zero message.
  */
 
+static bool rk_ahash_need_fallback(struct ahash_request *req)
+{
+       struct scatterlist *sg;
+
+       sg = req->src;
+       while (sg) {
+               if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
+                       return true;
+               }
+               if (sg->length % 4) {
+                       return true;
+               }
+               sg = sg_next(sg);
+       }
+       return false;
+}
+
+static int rk_ahash_digest_fb(struct ahash_request *areq)
+{
+       struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+       struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm);
+
+       ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+       rctx->fallback_req.base.flags = areq->base.flags &
+                                       CRYPTO_TFM_REQ_MAY_SLEEP;
+
+       rctx->fallback_req.nbytes = areq->nbytes;
+       rctx->fallback_req.src = areq->src;
+       rctx->fallback_req.result = areq->result;
+
+       return crypto_ahash_digest(&rctx->fallback_req);
+}
+
 static int zero_message_process(struct ahash_request *req)
 {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
        struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
        struct rk_crypto_info *dev = tctx->dev;
 
+       if (rk_ahash_need_fallback(req))
+               return rk_ahash_digest_fb(req);
+
        if (!req->nbytes)
                return zero_message_process(req);
        else
        struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
 
        free_page((unsigned long)tctx->dev->addr_vir);
+       crypto_free_ahash(tctx->fallback_tfm);
 }
 
 struct rk_crypto_tmp rk_ahash_sha1 = {