So far, the cache of the ahash requests was updated from the 'complete'
operation. This complete operation is called from mv_cesa_tdma_process
before the cleanup operation, which means that the content of req->src
can be read and copied when it is still mapped. This commit fixes the
issue by moving this cache update from mv_cesa_ahash_complete to
mv_cesa_ahash_req_cleanup, so the copy is done once the sglist is
unmapped.
Fixes: 1bf6682cb31d ("crypto: marvell - Add a complete operation for..")
Signed-off-by: Romain Perier <romain.perier@free-electrons.com>
Acked-by: Boris Brezillon <boris.brezillon@free-electrons.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
        for (i = 0; i < digsize / 4; i++)
                creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i));
 
-       if (creq->cache_ptr)
-               sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
-                                  creq->cache,
-                                  creq->cache_ptr,
-                                  ahashreq->nbytes - creq->cache_ptr);
-
        if (creq->last_req) {
                /*
                 * Hardware's MD5 digest is in little endian format, but
                mv_cesa_ahash_last_cleanup(ahashreq);
 
        mv_cesa_ahash_cleanup(ahashreq);
+
+       if (creq->cache_ptr)
+               sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
+                                  creq->cache,
+                                  creq->cache_ptr,
+                                  ahashreq->nbytes - creq->cache_ptr);
 }
 
 static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {