struct dw_edma_chunk *chunk;
        struct dw_edma_burst *burst;
        struct dw_edma_desc *desc;
-       u32 cnt;
+       u32 cnt = 0;
        int i;
 
        if (!chan->configured)
                return NULL;
        }
 
-       if (xfer->cyclic) {
+       if (xfer->type == EDMA_XFER_CYCLIC) {
                if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt)
                        return NULL;
-       } else {
+       } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
                if (xfer->xfer.sg.len < 1)
                        return NULL;
+       } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
+               if (!xfer->xfer.il->numf)
+                       return NULL;
+               if (xfer->xfer.il->numf > 0 && xfer->xfer.il->frame_size > 0)
+                       return NULL;
+       } else {
+               return NULL;
        }
 
        desc = dw_edma_alloc_desc(chan);
        if (unlikely(!chunk))
                goto err_alloc;
 
-       src_addr = chan->config.src_addr;
-       dst_addr = chan->config.dst_addr;
+       if (xfer->type == EDMA_XFER_INTERLEAVED) {
+               src_addr = xfer->xfer.il->src_start;
+               dst_addr = xfer->xfer.il->dst_start;
+       } else {
+               src_addr = chan->config.src_addr;
+               dst_addr = chan->config.dst_addr;
+       }
 
-       if (xfer->cyclic) {
+       if (xfer->type == EDMA_XFER_CYCLIC) {
                cnt = xfer->xfer.cyclic.cnt;
-       } else {
+       } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
                cnt = xfer->xfer.sg.len;
                sg = xfer->xfer.sg.sgl;
+       } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
+               if (xfer->xfer.il->numf > 0)
+                       cnt = xfer->xfer.il->numf;
+               else
+                       cnt = xfer->xfer.il->frame_size;
        }
 
        for (i = 0; i < cnt; i++) {
-               if (!xfer->cyclic && !sg)
+               if (xfer->type == EDMA_XFER_SCATTER_GATHER && !sg)
                        break;
 
                if (chunk->bursts_alloc == chan->ll_max) {
                if (unlikely(!burst))
                        goto err_alloc;
 
-               if (xfer->cyclic)
+               if (xfer->type == EDMA_XFER_CYCLIC)
                        burst->sz = xfer->xfer.cyclic.len;
-               else
+               else if (xfer->type == EDMA_XFER_SCATTER_GATHER)
                        burst->sz = sg_dma_len(sg);
+               else if (xfer->type == EDMA_XFER_INTERLEAVED)
+                       burst->sz = xfer->xfer.il->sgl[i].size;
 
                chunk->ll_region.sz += burst->sz;
                desc->alloc_sz += burst->sz;
 
                if (chan->dir == EDMA_DIR_WRITE) {
                        burst->sar = src_addr;
-                       if (xfer->cyclic) {
+                       if (xfer->type == EDMA_XFER_CYCLIC) {
                                burst->dar = xfer->xfer.cyclic.paddr;
-                       } else {
+                       } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
                                burst->dar = dst_addr;
                                /* Unlike the typical assumption by other
                                 * drivers/IPs the peripheral memory isn't
                        }
                } else {
                        burst->dar = dst_addr;
-                       if (xfer->cyclic) {
+                       if (xfer->type == EDMA_XFER_CYCLIC) {
                                burst->sar = xfer->xfer.cyclic.paddr;
-                       } else {
+                       } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
                                burst->sar = src_addr;
                                /* Unlike the typical assumption by other
                                 * drivers/IPs the peripheral memory isn't
                        }
                }
 
-               if (!xfer->cyclic) {
+               if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
                        src_addr += sg_dma_len(sg);
                        dst_addr += sg_dma_len(sg);
                        sg = sg_next(sg);
+               } else if (xfer->type == EDMA_XFER_INTERLEAVED &&
+                          xfer->xfer.il->frame_size > 0) {
+                       struct dma_interleaved_template *il = xfer->xfer.il;
+                       struct data_chunk *dc = &il->sgl[i];
+
+                       if (il->src_sgl) {
+                               src_addr += burst->sz;
+                               src_addr += dmaengine_get_src_icg(il, dc);
+                       }
+
+                       if (il->dst_sgl) {
+                               dst_addr += burst->sz;
+                               dst_addr += dmaengine_get_dst_icg(il, dc);
+                       }
                }
        }
 
        xfer.xfer.sg.sgl = sgl;
        xfer.xfer.sg.len = len;
        xfer.flags = flags;
-       xfer.cyclic = false;
+       xfer.type = EDMA_XFER_SCATTER_GATHER;
 
        return dw_edma_device_transfer(&xfer);
 }
        xfer.xfer.cyclic.len = len;
        xfer.xfer.cyclic.cnt = count;
        xfer.flags = flags;
-       xfer.cyclic = true;
+       xfer.type = EDMA_XFER_CYCLIC;
+
+       return dw_edma_device_transfer(&xfer);
+}
+
+static struct dma_async_tx_descriptor *
+dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan,
+                                   struct dma_interleaved_template *ilt,
+                                   unsigned long flags)
+{
+       struct dw_edma_transfer xfer;
+
+       xfer.dchan = dchan;
+       xfer.direction = ilt->dir;
+       xfer.xfer.il = ilt;
+       xfer.flags = flags;
+       xfer.type = EDMA_XFER_INTERLEAVED;
 
        return dw_edma_device_transfer(&xfer);
 }
        dma_cap_set(DMA_SLAVE, dma->cap_mask);
        dma_cap_set(DMA_CYCLIC, dma->cap_mask);
        dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+       dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
        dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV);
        dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
        dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
        dma->device_tx_status = dw_edma_device_tx_status;
        dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg;
        dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic;
+       dma->device_prep_interleaved_dma = dw_edma_device_prep_interleaved_dma;
 
        dma_set_max_seg_size(dma->dev, U32_MAX);