}
 
 static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
-               struct scatterlist *in_sg, struct scatterlist *out_sg)
+               struct scatterlist *in_sg, struct scatterlist *out_sg,
+               int in_sg_len, int out_sg_len)
 {
        struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
        struct omap_aes_dev *dd = ctx->dd;
        struct dma_async_tx_descriptor *tx_in, *tx_out;
        struct dma_slave_config cfg;
-       dma_addr_t dma_addr_in = sg_dma_address(in_sg);
-       int ret, length = sg_dma_len(in_sg);
-
-       pr_debug("len: %d\n", length);
-
-       dd->dma_size = length;
-
-       if (!(dd->flags & FLAGS_FAST))
-               dma_sync_single_for_device(dd->dev, dma_addr_in, length,
-                                          DMA_TO_DEVICE);
+       int ret;
 
        memset(&cfg, 0, sizeof(cfg));
 
                return ret;
        }
 
-       tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, 1,
+       tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len,
                                        DMA_MEM_TO_DEV,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!tx_in) {
                return ret;
        }
 
-       tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, 1,
+       tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
                                        DMA_DEV_TO_MEM,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!tx_out) {
        dma_async_issue_pending(dd->dma_lch_out);
 
        /* start DMA */
-       dd->pdata->trigger(dd, length);
+       dd->pdata->trigger(dd, dd->total);
 
        return 0;
 }
 {
        struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
                                        crypto_ablkcipher_reqtfm(dd->req));
-       int err, fast = 0, in, out;
-       size_t count;
-       dma_addr_t addr_in, addr_out;
-       struct scatterlist *in_sg, *out_sg;
-       int len32;
+       int err;
 
        pr_debug("total: %d\n", dd->total);
 
-       if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) {
-               /* check for alignment */
-               in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32));
-               out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32));
-
-               fast = in && out;
+       err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
+       if (!err) {
+               dev_err(dd->dev, "dma_map_sg() error\n");
+               return -EINVAL;
        }
 
-       if (fast)  {
-               count = min(dd->total, sg_dma_len(dd->in_sg));
-               count = min(count, sg_dma_len(dd->out_sg));
-
-               if (count != dd->total) {
-                       pr_err("request length != buffer length\n");
-                       return -EINVAL;
-               }
-
-               pr_debug("fast\n");
-
-               err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
-               if (!err) {
-                       dev_err(dd->dev, "dma_map_sg() error\n");
-                       return -EINVAL;
-               }
-
-               err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
-               if (!err) {
-                       dev_err(dd->dev, "dma_map_sg() error\n");
-                       dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
-                       return -EINVAL;
-               }
-
-               addr_in = sg_dma_address(dd->in_sg);
-               addr_out = sg_dma_address(dd->out_sg);
-
-               in_sg = dd->in_sg;
-               out_sg = dd->out_sg;
-
-               dd->flags |= FLAGS_FAST;
-
-       } else {
-               /* use cache buffers */
-               count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in,
-                                dd->buflen, dd->total, 0);
-
-               len32 = DIV_ROUND_UP(count, DMA_MIN) * DMA_MIN;
-
-               /*
-                * The data going into the AES module has been copied
-                * to a local buffer and the data coming out will go
-                * into a local buffer so set up local SG entries for
-                * both.
-                */
-               sg_init_table(&dd->in_sgl, 1);
-               dd->in_sgl.offset = dd->in_offset;
-               sg_dma_len(&dd->in_sgl) = len32;
-               sg_dma_address(&dd->in_sgl) = dd->dma_addr_in;
-
-               sg_init_table(&dd->out_sgl, 1);
-               dd->out_sgl.offset = dd->out_offset;
-               sg_dma_len(&dd->out_sgl) = len32;
-               sg_dma_address(&dd->out_sgl) = dd->dma_addr_out;
-
-               in_sg = &dd->in_sgl;
-               out_sg = &dd->out_sgl;
-
-               addr_in = dd->dma_addr_in;
-               addr_out = dd->dma_addr_out;
-
-               dd->flags &= ~FLAGS_FAST;
-
+       err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
+       if (!err) {
+               dev_err(dd->dev, "dma_map_sg() error\n");
+               return -EINVAL;
        }
 
-       dd->total -= count;
-
-       err = omap_aes_crypt_dma(tfm, in_sg, out_sg);
+       err = omap_aes_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len,
+                                dd->out_sg_len);
        if (err) {
-               dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
-               dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
+               dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
+               dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+                            DMA_FROM_DEVICE);
        }
 
        return err;
 static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
 {
        int err = 0;
-       size_t count;
 
        pr_debug("total: %d\n", dd->total);
 
        dmaengine_terminate_all(dd->dma_lch_in);
        dmaengine_terminate_all(dd->dma_lch_out);
 
-       if (dd->flags & FLAGS_FAST) {
-               dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
-               dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
-       } else {
-               dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
-                                          dd->dma_size, DMA_FROM_DEVICE);
-
-               /* copy data */
-               count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out,
-                                dd->buflen, dd->dma_size, 1);
-               if (count != dd->dma_size) {
-                       err = -EINVAL;
-                       pr_err("not all data converted: %u\n", count);
-               }
-       }
+       dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
+       dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
 
        return err;
 }
 static void omap_aes_done_task(unsigned long data)
 {
        struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
-       int err;
-
-       pr_debug("enter\n");
 
-       err = omap_aes_crypt_dma_stop(dd);
-
-       err = dd->err ? : err;
-
-       if (dd->total && !err) {
-               err = omap_aes_crypt_dma_start(dd);
-               if (!err)
-                       return; /* DMA started. Not fininishing. */
-       }
+       pr_debug("enter done_task\n");
 
-       omap_aes_finish_req(dd, err);
+       omap_aes_crypt_dma_stop(dd);
+       omap_aes_finish_req(dd, 0);
        omap_aes_handle_queue(dd, NULL);
 
        pr_debug("exit\n");