#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
 
 #include "dmaengine.h"
+#include "virt-dma.h"
 
 /* SDMA registers */
 #define SDMA_H_C0PTR           0x000
  * @bd                 pointer of alloced bd
  */
 struct sdma_desc {
+       struct virt_dma_desc    vd;
        unsigned int            num_bd;
        dma_addr_t              bd_phys;
        unsigned int            buf_tail;
  * @word_size          peripheral access size
  */
 struct sdma_channel {
+       struct virt_dma_chan            vc;
        struct sdma_desc                *desc;
-       struct sdma_desc                _desc;
        struct sdma_engine              *sdma;
        unsigned int                    channel;
        enum dma_transfer_direction             direction;
        unsigned long                   event_mask[2];
        unsigned long                   watermark_level;
        u32                             shp_addr, per_addr;
-       struct dma_chan                 chan;
        spinlock_t                      lock;
-       struct dma_async_tx_descriptor  txdesc;
        enum dma_status                 status;
-       struct tasklet_struct           tasklet;
        struct imx_dma_data             data;
        bool                            enabled;
 };
        writel_relaxed(val, sdma->regs + chnenbl);
 }
 
+static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t)
+{
+       return container_of(t, struct sdma_desc, vd.tx);
+}
+
+static void sdma_start_desc(struct sdma_channel *sdmac)
+{
+       struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
+       struct sdma_desc *desc;
+       struct sdma_engine *sdma = sdmac->sdma;
+       int channel = sdmac->channel;
+
+       if (!vd) {
+               sdmac->desc = NULL;
+               return;
+       }
+       sdmac->desc = desc = to_sdma_desc(&vd->tx);
+       /*
+        * Do not delete the node in desc_issued list in cyclic mode, otherwise
+        * the desc alloced will never be freed in vchan_dma_desc_free_list
+        */
+       if (!(sdmac->flags & IMX_DMA_SG_LOOP))
+               list_del(&vd->node);
+
+       sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
+       sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
+       sdma_enable_channel(sdma, sdmac->channel);
+}
+
 static void sdma_update_channel_loop(struct sdma_channel *sdmac)
 {
        struct sdma_buffer_descriptor *bd;
         * loop mode. Iterate over descriptors, re-setup them and
         * call callback function.
         */
-       while (1) {
+       while (sdmac->desc) {
                struct sdma_desc *desc = sdmac->desc;
 
                bd = &desc->bd[desc->buf_tail];
                 * SDMA transaction status by the time the client tasklet is
                 * executed.
                 */
-
-               dmaengine_desc_get_callback_invoke(&sdmac->txdesc, NULL);
+               spin_unlock(&sdmac->vc.lock);
+               dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
+               spin_lock(&sdmac->vc.lock);
 
                if (error)
                        sdmac->status = old_status;
        }
 }
 
-static void mxc_sdma_handle_channel_normal(unsigned long data)
+static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
 {
        struct sdma_channel *sdmac = (struct sdma_channel *) data;
        struct sdma_buffer_descriptor *bd;
                sdmac->status = DMA_ERROR;
        else
                sdmac->status = DMA_COMPLETE;
-
-       dma_cookie_complete(&sdmac->txdesc);
-
-       dmaengine_desc_get_callback_invoke(&sdmac->txdesc, NULL);
 }
 
 static irqreturn_t sdma_int_handler(int irq, void *dev_id)
        while (stat) {
                int channel = fls(stat) - 1;
                struct sdma_channel *sdmac = &sdma->channel[channel];
+               struct sdma_desc *desc;
+
+               spin_lock(&sdmac->vc.lock);
+               desc = sdmac->desc;
+               if (desc) {
+                       if (sdmac->flags & IMX_DMA_SG_LOOP) {
+                               sdma_update_channel_loop(sdmac);
+                       } else {
+                               mxc_sdma_handle_channel_normal(sdmac);
+                               vchan_cookie_complete(&desc->vd);
+                               sdma_start_desc(sdmac);
+                       }
+               }
 
-               if (sdmac->flags & IMX_DMA_SG_LOOP)
-                       sdma_update_channel_loop(sdmac);
-               else
-                       tasklet_schedule(&sdmac->tasklet);
-
+               spin_unlock(&sdmac->vc.lock);
                __clear_bit(channel, &stat);
        }
 
 
 static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
 {
-       return container_of(chan, struct sdma_channel, chan);
+       return container_of(chan, struct sdma_channel, vc.chan);
 }
 
 static int sdma_disable_channel(struct dma_chan *chan)
 
 static int sdma_disable_channel_with_delay(struct dma_chan *chan)
 {
+       struct sdma_channel *sdmac = to_sdma_chan(chan);
+       unsigned long flags;
+       LIST_HEAD(head);
+
        sdma_disable_channel(chan);
+       spin_lock_irqsave(&sdmac->vc.lock, flags);
+       vchan_get_all_descriptors(&sdmac->vc, &head);
+       sdmac->desc = NULL;
+       spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+       vchan_dma_desc_free_list(&sdmac->vc, &head);
 
        /*
         * According to NXP R&D team a delay of one BD SDMA cost time
        return 0;
 }
 
-static int sdma_request_channel(struct sdma_channel *sdmac)
+static int sdma_request_channel0(struct sdma_engine *sdma)
 {
-       struct sdma_engine *sdma = sdmac->sdma;
-       struct sdma_desc *desc;
-       int channel = sdmac->channel;
        int ret = -EBUSY;
 
-       sdmac->desc = &sdmac->_desc;
-       desc = sdmac->desc;
-
-       desc->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &desc->bd_phys,
-                                       GFP_KERNEL);
-       if (!desc->bd) {
+       sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
+                                       GFP_NOWAIT);
+       if (!sdma->bd0) {
                ret = -ENOMEM;
                goto out;
        }
 
-       sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
-       sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
+       sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys;
+       sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys;
 
-       sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
+       sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY);
        return 0;
 out:
 
        return ret;
 }
 
-static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
+
+static int sdma_alloc_bd(struct sdma_desc *desc)
 {
-       unsigned long flags;
-       struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
-       dma_cookie_t cookie;
+       u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
+       int ret = 0;
 
-       spin_lock_irqsave(&sdmac->lock, flags);
+       desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys,
+                                       GFP_ATOMIC);
+       if (!desc->bd) {
+               ret = -ENOMEM;
+               goto out;
+       }
+out:
+       return ret;
+}
 
-       cookie = dma_cookie_assign(tx);
+static void sdma_free_bd(struct sdma_desc *desc)
+{
+       u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
 
-       spin_unlock_irqrestore(&sdmac->lock, flags);
+       dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys);
+}
 
-       return cookie;
+static void sdma_desc_free(struct virt_dma_desc *vd)
+{
+       struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd);
+
+       sdma_free_bd(desc);
+       kfree(desc);
 }
 
 static int sdma_alloc_chan_resources(struct dma_chan *chan)
        if (ret)
                goto disable_clk_ipg;
 
-       ret = sdma_request_channel(sdmac);
-       if (ret)
-               goto disable_clk_ahb;
-
        ret = sdma_set_channel_priority(sdmac, prio);
        if (ret)
                goto disable_clk_ahb;
 
-       dma_async_tx_descriptor_init(&sdmac->txdesc, chan);
-       sdmac->txdesc.tx_submit = sdma_tx_submit;
-       /* txd.flags will be overwritten in prep funcs */
-       sdmac->txdesc.flags = DMA_CTRL_ACK;
-
        return 0;
 
 disable_clk_ahb:
 {
        struct sdma_channel *sdmac = to_sdma_chan(chan);
        struct sdma_engine *sdma = sdmac->sdma;
-       struct sdma_desc *desc = sdmac->desc;
 
-       sdma_disable_channel(chan);
+       sdma_disable_channel_with_delay(chan);
 
        if (sdmac->event_id0)
                sdma_event_disable(sdmac, sdmac->event_id0);
 
        sdma_set_channel_priority(sdmac, 0);
 
-       dma_free_coherent(NULL, PAGE_SIZE, desc->bd, desc->bd_phys);
-
        clk_disable(sdma->clk_ipg);
        clk_disable(sdma->clk_ahb);
 }
        int ret, i, count;
        int channel = sdmac->channel;
        struct scatterlist *sg;
-       struct sdma_desc *desc = sdmac->desc;
+       struct sdma_desc *desc;
 
        if (sdmac->status == DMA_IN_PROGRESS)
                return NULL;
 
        sdmac->flags = 0;
 
+       desc = kzalloc((sizeof(*desc)), GFP_NOWAIT);
+       if (!desc)
+               goto err_out;
+
        desc->buf_tail = 0;
        desc->buf_ptail = 0;
+       desc->sdmac = sdmac;
+       desc->num_bd = sg_len;
        desc->chn_real_count = 0;
 
+       if (sdma_alloc_bd(desc)) {
+               kfree(desc);
+               goto err_out;
+       }
+
        dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
                        sg_len, channel);
 
        sdmac->direction = direction;
        ret = sdma_load_context(sdmac);
        if (ret)
-               goto err_out;
+               goto err_bd_out;
 
        if (sg_len > NUM_BD) {
                dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
                                channel, sg_len, NUM_BD);
                ret = -EINVAL;
-               goto err_out;
+               goto err_bd_out;
        }
 
        desc->chn_count = 0;
                        dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
                                        channel, count, 0xffff);
                        ret = -EINVAL;
-                       goto err_out;
+                       goto err_bd_out;
                }
 
                bd->mode.count = count;
 
                if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
                        ret =  -EINVAL;
-                       goto err_out;
+                       goto err_bd_out;
                }
 
                switch (sdmac->word_size) {
                case DMA_SLAVE_BUSWIDTH_4_BYTES:
                        bd->mode.command = 0;
                        if (count & 3 || sg->dma_address & 3)
-                               return NULL;
+                               goto err_bd_out;
                        break;
                case DMA_SLAVE_BUSWIDTH_2_BYTES:
                        bd->mode.command = 2;
                        if (count & 1 || sg->dma_address & 1)
-                               return NULL;
+                               goto err_bd_out;
                        break;
                case DMA_SLAVE_BUSWIDTH_1_BYTE:
                        bd->mode.command = 1;
                        break;
                default:
-                       return NULL;
+                       goto err_bd_out;
                }
 
                param = BD_DONE | BD_EXTD | BD_CONT;
                bd->mode.status = param;
        }
 
-       desc->num_bd = sg_len;
-       sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
-
-       return &sdmac->txdesc;
+       return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
+err_bd_out:
+       sdma_free_bd(desc);
+       kfree(desc);
 err_out:
        sdmac->status = DMA_ERROR;
        return NULL;
        int num_periods = buf_len / period_len;
        int channel = sdmac->channel;
        int ret, i = 0, buf = 0;
-       struct sdma_desc *desc = sdmac->desc;
+       struct sdma_desc *desc;
 
        dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
 
 
        sdmac->status = DMA_IN_PROGRESS;
 
+       desc = kzalloc((sizeof(*desc)), GFP_NOWAIT);
+       if (!desc)
+               goto err_out;
+
        desc->buf_tail = 0;
        desc->buf_ptail = 0;
+       desc->sdmac = sdmac;
+       desc->num_bd = num_periods;
        desc->chn_real_count = 0;
        desc->period_len = period_len;
 
        sdmac->flags |= IMX_DMA_SG_LOOP;
        sdmac->direction = direction;
+
+       if (sdma_alloc_bd(desc)) {
+               kfree(desc);
+               goto err_bd_out;
+       }
+
        ret = sdma_load_context(sdmac);
        if (ret)
-               goto err_out;
+               goto err_bd_out;
 
        if (num_periods > NUM_BD) {
                dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
                                channel, num_periods, NUM_BD);
-               goto err_out;
+               goto err_bd_out;
        }
 
        if (period_len > 0xffff) {
                dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
                                channel, period_len, 0xffff);
-               goto err_out;
+               goto err_bd_out;
        }
 
        while (buf < buf_len) {
                bd->mode.count = period_len;
 
                if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
-                       goto err_out;
+                       goto err_bd_out;
                if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
                        bd->mode.command = 0;
                else
                i++;
        }
 
-       desc->num_bd = num_periods;
-       sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
-
-       return &sdmac->txdesc;
+       return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
+err_bd_out:
+       sdma_free_bd(desc);
+       kfree(desc);
 err_out:
        sdmac->status = DMA_ERROR;
        return NULL;
                                      struct dma_tx_state *txstate)
 {
        struct sdma_channel *sdmac = to_sdma_chan(chan);
-       struct sdma_desc *desc = sdmac->desc;
+       struct sdma_desc *desc;
        u32 residue;
+       struct virt_dma_desc *vd;
+       enum dma_status ret;
+       unsigned long flags;
 
-       if (sdmac->flags & IMX_DMA_SG_LOOP)
-               residue = (desc->num_bd - desc->buf_ptail) *
-                          desc->period_len - desc->chn_real_count;
-       else
-               residue = desc->chn_count - desc->chn_real_count;
+       ret = dma_cookie_status(chan, cookie, txstate);
+       if (ret == DMA_COMPLETE || !txstate)
+               return ret;
+
+       spin_lock_irqsave(&sdmac->vc.lock, flags);
+       vd = vchan_find_desc(&sdmac->vc, cookie);
+       if (vd) {
+               desc = to_sdma_desc(&vd->tx);
+               if (sdmac->flags & IMX_DMA_SG_LOOP)
+                       residue = (desc->num_bd - desc->buf_ptail) *
+                               desc->period_len - desc->chn_real_count;
+               else
+                       residue = desc->chn_count - desc->chn_real_count;
+       } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) {
+               residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count;
+       } else {
+               residue = 0;
+       }
+       spin_unlock_irqrestore(&sdmac->vc.lock, flags);
 
        dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
                         residue);
 static void sdma_issue_pending(struct dma_chan *chan)
 {
        struct sdma_channel *sdmac = to_sdma_chan(chan);
-       struct sdma_engine *sdma = sdmac->sdma;
+       unsigned long flags;
 
-       if (sdmac->status == DMA_IN_PROGRESS)
-               sdma_enable_channel(sdma, sdmac->channel);
+       spin_lock_irqsave(&sdmac->vc.lock, flags);
+       if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
+               sdma_start_desc(sdmac);
+       spin_unlock_irqrestore(&sdmac->vc.lock, flags);
 }
 
 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1        34
        for (i = 0; i < MAX_DMA_CHANNELS; i++)
                writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
 
-       ret = sdma_request_channel(&sdma->channel[0]);
+       ret = sdma_request_channel0(sdma);
        if (ret)
                goto err_dma_alloc;
 
-       sdma->bd0 = sdma->channel[0].desc->bd;
-
        sdma_config_ownership(&sdma->channel[0], false, true, false);
 
        /* Set Command Channel (Channel Zero) */
                sdmac->sdma = sdma;
                spin_lock_init(&sdmac->lock);
 
-               sdmac->chan.device = &sdma->dma_device;
-               dma_cookie_init(&sdmac->chan);
                sdmac->channel = i;
-
-               tasklet_init(&sdmac->tasklet, mxc_sdma_handle_channel_normal,
-                            (unsigned long) sdmac);
+               sdmac->vc.desc_free = sdma_desc_free;
                /*
                 * Add the channel to the DMAC list. Do not add channel 0 though
                 * because we need it internally in the SDMA driver. This also means
                 * that channel 0 in dmaengine counting matches sdma channel 1.
                 */
                if (i)
-                       list_add_tail(&sdmac->chan.device_node,
-                                       &sdma->dma_device.channels);
+                       vchan_init(&sdmac->vc, &sdma->dma_device);
        }
 
        ret = sdma_init(sdma);
        for (i = 0; i < MAX_DMA_CHANNELS; i++) {
                struct sdma_channel *sdmac = &sdma->channel[i];
 
-               tasklet_kill(&sdmac->tasklet);
+               tasklet_kill(&sdmac->vc.task);
+               sdma_free_chan_resources(&sdmac->vc.chan);
        }
 
        platform_set_drvdata(pdev, NULL);