/* AXI DMA Specific Masks/Bit fields */
 #define XILINX_DMA_MAX_TRANS_LEN       GENMASK(22, 0)
 #define XILINX_DMA_CR_COALESCE_MAX     GENMASK(23, 16)
+#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK        BIT(4)
 #define XILINX_DMA_CR_COALESCE_SHIFT   16
 #define XILINX_DMA_BD_SOP              BIT(27)
 #define XILINX_DMA_BD_EOP              BIT(26)
  * @async_tx: Async transaction descriptor
  * @segments: TX segments list
  * @node: Node in the channel descriptors list
+ * @cyclic: Check for cyclic transfers.
  */
 struct xilinx_dma_tx_descriptor {
        struct dma_async_tx_descriptor async_tx;
        struct list_head segments;
        struct list_head node;
+       bool cyclic;
 };
 
 /**
  * @direction: Transfer direction
  * @num_frms: Number of frames
  * @has_sg: Support scatter transfers
+ * @cyclic: Check for cyclic transfers.
  * @genlock: Support genlock mode
  * @err: Channel has errors
  * @tasklet: Cleanup work after irq
  * @desc_submitcount: Descriptor h/w submitted count
  * @residue: Residue for AXI DMA
  * @seg_v: Statically allocated segments base
+ * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
  * @start_transfer: Differentiate b/w DMA IP's transfer
  */
 struct xilinx_dma_chan {
        enum dma_transfer_direction direction;
        int num_frms;
        bool has_sg;
+       bool cyclic;
        bool genlock;
        bool err;
        struct tasklet_struct tasklet;
        u32 desc_submitcount;
        u32 residue;
        struct xilinx_axidma_tx_segment *seg_v;
+       struct xilinx_axidma_tx_segment *cyclic_seg_v;
        void (*start_transfer)(struct xilinx_dma_chan *chan);
 };
 
        dev_dbg(chan->dev, "Free all channel resources.\n");
 
        xilinx_dma_free_descriptors(chan);
-       if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+       if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+               xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v);
                xilinx_dma_free_tx_segment(chan, chan->seg_v);
+       }
        dma_pool_destroy(chan->desc_pool);
        chan->desc_pool = NULL;
 }
 
+/**
+ * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ * @flags: flags for spin lock
+ */
+static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
+                                         struct xilinx_dma_tx_descriptor *desc,
+                                         unsigned long *flags)
+{
+       dma_async_tx_callback callback;
+       void *callback_param;
+
+       callback = desc->async_tx.callback;
+       callback_param = desc->async_tx.callback_param;
+       if (callback) {
+               spin_unlock_irqrestore(&chan->lock, *flags);
+               callback(callback_param);
+               spin_lock_irqsave(&chan->lock, *flags);
+       }
+}
+
 /**
  * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
  * @chan: Driver specific DMA channel
                dma_async_tx_callback callback;
                void *callback_param;
 
+               if (desc->cyclic) {
+                       xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
+                       break;
+               }
+
                /* Remove from the list of running transactions */
                list_del(&desc->node);
 
                return -ENOMEM;
        }
 
-       if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+       if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
                /*
                 * For AXI DMA case after submitting a pending_list, keep
                 * an extra segment allocated so that the "next descriptor"
                 */
                chan->seg_v = xilinx_axidma_alloc_tx_segment(chan);
 
+               /*
+                * For cyclic DMA mode we need to program the tail Descriptor
+                * register with a value which is not a part of the BD chain
+                * so allocating a desc segment during channel allocation for
+                * programming tail descriptor.
+                */
+               chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan);
+       }
+
        dma_cookie_init(dchan);
 
        if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
 
        /* Start the transfer */
        if (chan->has_sg) {
-               dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
-                              tail_segment->phys);
+               if (chan->cyclic)
+                       dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
+                                      chan->cyclic_seg_v->phys);
+               else
+                       dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
+                                      tail_segment->phys);
        } else {
                struct xilinx_axidma_tx_segment *segment;
                struct xilinx_axidma_desc_hw *hw;
 
        list_for_each_entry_safe(desc, next, &chan->active_list, node) {
                list_del(&desc->node);
-               dma_cookie_complete(&desc->async_tx);
+               if (!desc->cyclic)
+                       dma_cookie_complete(&desc->async_tx);
                list_add_tail(&desc->node, &chan->done_list);
        }
 }
        unsigned long flags;
        int err;
 
+       if (chan->cyclic) {
+               xilinx_dma_free_tx_descriptor(chan, desc);
+               return -EBUSY;
+       }
+
        if (chan->err) {
                /*
                 * If reset fails, need to hard reset the system.
        /* Put this transaction onto the tail of the pending queue */
        append_desc_queue(chan, desc);
 
+       if (desc->cyclic)
+               chan->cyclic = true;
+
        spin_unlock_irqrestore(&chan->lock, flags);
 
        return cookie;
        return NULL;
 }
 
+/**
+ * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
+ * @chan: DMA channel
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
+ * @flags: transfer ack flags
+ */
+static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
+       struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
+       size_t period_len, enum dma_transfer_direction direction,
+       unsigned long flags)
+{
+       struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+       struct xilinx_dma_tx_descriptor *desc;
+       struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
+       size_t copy, sg_used;
+       unsigned int num_periods;
+       int i;
+       u32 reg;
+
+       num_periods = buf_len / period_len;
+
+       if (!is_slave_direction(direction))
+               return NULL;
+
+       /* Allocate a transaction descriptor. */
+       desc = xilinx_dma_alloc_tx_descriptor(chan);
+       if (!desc)
+               return NULL;
+
+       chan->direction = direction;
+       dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+       desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+       for (i = 0; i < num_periods; ++i) {
+               sg_used = 0;
+
+               while (sg_used < period_len) {
+                       struct xilinx_axidma_desc_hw *hw;
+
+                       /* Get a free segment */
+                       segment = xilinx_axidma_alloc_tx_segment(chan);
+                       if (!segment)
+                               goto error;
+
+                       /*
+                        * Calculate the maximum number of bytes to transfer,
+                        * making sure it is less than the hw limit
+                        */
+                       copy = min_t(size_t, period_len - sg_used,
+                                    XILINX_DMA_MAX_TRANS_LEN);
+                       hw = &segment->hw;
+                       hw->buf_addr = buf_addr + sg_used + (period_len * i);
+                       hw->control = copy;
+
+                       if (prev)
+                               prev->hw.next_desc = segment->phys;
+
+                       prev = segment;
+                       sg_used += copy;
+
+                       /*
+                        * Insert the segment into the descriptor segments
+                        * list.
+                        */
+                       list_add_tail(&segment->node, &desc->segments);
+               }
+       }
+
+       head_segment = list_first_entry(&desc->segments,
+                                  struct xilinx_axidma_tx_segment, node);
+       desc->async_tx.phys = head_segment->phys;
+
+       desc->cyclic = true;
+       reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+       reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
+       dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+
+       /* For the last DMA_MEM_TO_DEV transfer, set EOP */
+       if (direction == DMA_MEM_TO_DEV) {
+               segment->hw.control |= XILINX_DMA_BD_SOP;
+               segment = list_last_entry(&desc->segments,
+                                         struct xilinx_axidma_tx_segment,
+                                         node);
+               segment->hw.control |= XILINX_DMA_BD_EOP;
+               segment->hw.next_desc = (u32) head_segment->phys;
+       }
+
+       return &desc->async_tx;
+
+error:
+       xilinx_dma_free_tx_descriptor(chan, desc);
+       return NULL;
+}
+
 /**
  * xilinx_dma_terminate_all - Halt the channel and free descriptors
  * @chan: Driver specific DMA Channel pointer
 static int xilinx_dma_terminate_all(struct dma_chan *dchan)
 {
        struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+       u32 reg;
+
+       if (chan->cyclic)
+               xilinx_dma_chan_reset(chan);
 
        /* Halt the DMA engine */
        xilinx_dma_halt(chan);
        /* Remove and free all of the descriptors in the lists */
        xilinx_dma_free_descriptors(chan);
 
+       if (chan->cyclic) {
+               reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+               reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
+               dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+               chan->cyclic = false;
+       }
+
        return 0;
 }
 
        xdev->common.device_tx_status = xilinx_dma_tx_status;
        xdev->common.device_issue_pending = xilinx_dma_issue_pending;
        if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+               dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
                xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
+               xdev->common.device_prep_dma_cyclic =
+                                         xilinx_dma_prep_dma_cyclic;
                /* Residue calculation is supported by only AXI DMA */
                xdev->common.residue_granularity =
                                          DMA_RESIDUE_GRANULARITY_SEGMENT;