#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/debugfs.h>
+#include <linux/dmaengine.h>
 #include <linux/seq_file.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
        u32                     bytesleft;
        int                     suspended;
        int                     irq;
-       int                     use_dma, dma_ch;
+       int                     use_dma, dma_ch, dma2;
+       struct dma_chan         *tx_chan;
+       struct dma_chan         *rx_chan;
        int                     dma_line_tx, dma_line_rx;
        int                     slot_id;
        int                     response_busy;
                return DMA_FROM_DEVICE;
 }
 
+static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
+       struct mmc_data *data)
+{
+       return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
+}
+
 static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
 {
-       int dma_ch;
+       int dma_ch, dma2;
        unsigned long flags;
 
        spin_lock_irqsave(&host->irq_lock, flags);
        host->req_in_progress = 0;
        dma_ch = host->dma_ch;
+       dma2 = host->dma2;
        spin_unlock_irqrestore(&host->irq_lock, flags);
 
        omap_hsmmc_disable_irq(host);
        /* Do not complete the request if DMA is still in progress */
-       if (mrq->data && host->use_dma && dma_ch != -1)
+       if (mrq->data && host->use_dma && (dma_ch != -1 || dma2 != -1))
                return;
        host->mrq = NULL;
        mmc_request_done(host->mmc, mrq);
  */
 static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
 {
-       int dma_ch;
+       int dma_ch, dma2;
        unsigned long flags;
 
        host->data->error = errno;
        spin_lock_irqsave(&host->irq_lock, flags);
        dma_ch = host->dma_ch;
        host->dma_ch = -1;
+       dma2 = host->dma2;
+       host->dma2 = -1;
        spin_unlock_irqrestore(&host->irq_lock, flags);
 
+       if (host->use_dma && dma2 != -1) {
+               struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data);
+
+               dmaengine_terminate_all(chan);
+               dma_unmap_sg(chan->device->dev,
+                       host->data->sg, host->data->sg_len,
+                       omap_hsmmc_get_dma_dir(host, host->data));
+
+               host->data->host_cookie = 0;
+       }
        if (host->use_dma && dma_ch != -1) {
                dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
                        host->data->sg_len,
        }
 }
 
+static void omap_hsmmc_dma_callback(void *param)
+{
+       struct omap_hsmmc_host *host = param;
+       struct dma_chan *chan;
+       struct mmc_data *data;
+       int req_in_progress;
+
+       spin_lock_irq(&host->irq_lock);
+       if (host->dma2 < 0) {
+               spin_unlock_irq(&host->irq_lock);
+               return;
+       }
+
+       data = host->mrq->data;
+       chan = omap_hsmmc_get_dma_chan(host, data);
+       if (!data->host_cookie)
+               dma_unmap_sg(chan->device->dev,
+                            data->sg, data->sg_len,
+                            omap_hsmmc_get_dma_dir(host, data));
+
+       req_in_progress = host->req_in_progress;
+       host->dma2 = -1;
+       spin_unlock_irq(&host->irq_lock);
+
+       /* If DMA has finished after TC, complete the request */
+       if (!req_in_progress) {
+               struct mmc_request *mrq = host->mrq;
+
+               host->mrq = NULL;
+               mmc_request_done(host->mmc, mrq);
+       }
+}
+
 static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
                                       struct mmc_data *data,
-                                      struct omap_hsmmc_next *next)
+                                      struct omap_hsmmc_next *next,
+                                      struct device *dev)
 {
        int dma_len;
 
        /* Check if next job is already prepared */
        if (next ||
            (!next && data->host_cookie != host->next_data.cookie)) {
-               dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
-                                    data->sg_len,
+               dma_len = dma_map_sg(dev, data->sg, data->sg_len,
                                     omap_hsmmc_get_dma_dir(host, data));
 
        } else {
 {
        int dma_ch = 0, ret = 0, i;
        struct mmc_data *data = req->data;
+       struct dma_chan *chan;
 
        /* Sanity check: all the SG entries must be aligned by block size. */
        for (i = 0; i < data->sg_len; i++) {
                 */
                return -EINVAL;
 
-       BUG_ON(host->dma_ch != -1);
+       BUG_ON(host->dma_ch != -1 || host->dma2 != -1);
 
-       ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
-                              "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
-       if (ret != 0) {
-               dev_err(mmc_dev(host->mmc),
-                       "%s: omap_request_dma() failed with %d\n",
-                       mmc_hostname(host->mmc), ret);
-               return ret;
-       }
-       ret = omap_hsmmc_pre_dma_transfer(host, data, NULL);
-       if (ret)
-               return ret;
+       chan = omap_hsmmc_get_dma_chan(host, data);
+       if (!chan) {
+               ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
+                                      "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
+               if (ret != 0) {
+                       dev_err(mmc_dev(host->mmc),
+                               "%s: omap_request_dma() failed with %d\n",
+                               mmc_hostname(host->mmc), ret);
+                       return ret;
+               }
+               ret = omap_hsmmc_pre_dma_transfer(host, data, NULL,
+                                                 mmc_dev(host->mmc));
+               if (ret)
+                       return ret;
+
+               host->dma_ch = dma_ch;
+               host->dma_sg_idx = 0;
+
+               omap_hsmmc_config_dma_params(host, data, data->sg);
+       } else {
+               struct dma_slave_config cfg;
+               struct dma_async_tx_descriptor *tx;
+
+               cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
+               cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
+               cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+               cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+               cfg.src_maxburst = data->blksz / 4;
+               cfg.dst_maxburst = data->blksz / 4;
+
+               ret = dmaengine_slave_config(chan, &cfg);
+               if (ret)
+                       return ret;
+
+               ret = omap_hsmmc_pre_dma_transfer(host, data, NULL,
+                                                 chan->device->dev);
+               if (ret)
+                       return ret;
+
+               tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len,
+                       data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+               if (!tx) {
+                       dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
+                       /* FIXME: cleanup */
+                       return -1;
+               }
 
-       host->dma_ch = dma_ch;
-       host->dma_sg_idx = 0;
+               tx->callback = omap_hsmmc_dma_callback;
+               tx->callback_param = host;
 
-       omap_hsmmc_config_dma_params(host, data, data->sg);
+               /* Does not fail */
+               dmaengine_submit(tx);
+
+               host->dma2 = 1;
+
+               dma_async_issue_pending(chan);
+       }
 
        return 0;
 }
        struct mmc_data *data = mrq->data;
 
        if (host->use_dma) {
+               struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);
+               struct device *dev = c ? c->device->dev : mmc_dev(mmc);
+
                if (data->host_cookie)
-                       dma_unmap_sg(mmc_dev(host->mmc), data->sg,
-                                    data->sg_len,
+                       dma_unmap_sg(dev,
+                                    data->sg, data->sg_len,
                                     omap_hsmmc_get_dma_dir(host, data));
                data->host_cookie = 0;
        }
                return ;
        }
 
-       if (host->use_dma)
+       if (host->use_dma) {
+               struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data);
+               struct device *dev = c ? c->device->dev : mmc_dev(mmc);
+
                if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
-                                               &host->next_data))
+                                               &host->next_data, dev))
                        mrq->data->host_cookie = 0;
+       }
 }
 
 /*
        int err;
 
        BUG_ON(host->req_in_progress);
-       BUG_ON(host->dma_ch != -1);
+       BUG_ON(host->dma_ch != -1 || host->dma2 != -1);
        if (host->protect_card) {
                if (host->reqs_blocked < 3) {
                        /*
        host->use_dma   = 1;
        host->dev->dma_mask = &pdata->dma_mask;
        host->dma_ch    = -1;
+       host->dma2      = -1;
        host->irq       = irq;
        host->slot_id   = 0;
        host->mapbase   = res->start + pdata->reg_offset;
        }
        host->dma_line_rx = res->start;
 
+       {
+               dma_cap_mask_t mask;
+               unsigned sig;
+               extern bool omap_dma_filter_fn(struct dma_chan *chan, void *param);
+
+               dma_cap_zero(mask);
+               dma_cap_set(DMA_SLAVE, mask);
+#if 1
+               sig = host->dma_line_rx;
+               host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+               if (!host->rx_chan) {
+                       dev_warn(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", sig);
+               }
+#endif
+#if 1
+               sig = host->dma_line_tx;
+               host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+               if (!host->tx_chan) {
+                       dev_warn(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", sig);
+               }
+#endif
+       }
+
        /* Request IRQ for MMC operations */
        ret = request_irq(host->irq, omap_hsmmc_irq, 0,
                        mmc_hostname(mmc), host);
 err_irq_cd_init:
        free_irq(host->irq, host);
 err_irq:
+       if (host->tx_chan)
+               dma_release_channel(host->tx_chan);
+       if (host->rx_chan)
+               dma_release_channel(host->rx_chan);
        pm_runtime_put_sync(host->dev);
        pm_runtime_disable(host->dev);
        clk_put(host->fclk);
        if (mmc_slot(host).card_detect_irq)
                free_irq(mmc_slot(host).card_detect_irq, host);
 
+       if (host->tx_chan)
+               dma_release_channel(host->tx_chan);
+       if (host->rx_chan)
+               dma_release_channel(host->rx_chan);
+
        pm_runtime_put_sync(host->dev);
        pm_runtime_disable(host->dev);
        clk_put(host->fclk);