{
        struct xilinx_vdma_config *config = &chan->config;
        struct xilinx_vdma_tx_descriptor *desc, *tail_desc;
-       unsigned long flags;
        u32 reg;
        struct xilinx_vdma_tx_segment *tail_segment;
 
+       /* This function was invoked with lock held */
        if (chan->err)
                return;
 
-       spin_lock_irqsave(&chan->lock, flags);
-
        if (list_empty(&chan->pending_list))
-               goto out_unlock;
+               return;
 
        desc = list_first_entry(&chan->pending_list,
                                struct xilinx_vdma_tx_descriptor, node);
        if (chan->has_sg && xilinx_vdma_is_running(chan) &&
            !xilinx_vdma_is_idle(chan)) {
                dev_dbg(chan->dev, "DMA controller still busy\n");
-               goto out_unlock;
+               return;
        }
 
        /*
        xilinx_vdma_start(chan);
 
        if (chan->err)
-               goto out_unlock;
+               return;
 
        /* Start the transfer */
        if (chan->has_sg) {
                }
 
                if (!last)
-                       goto out_unlock;
+                       return;
 
                /* HW expects these parameters to be same for one transaction */
                vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize);
 
        list_splice_tail_init(&chan->pending_list, &chan->active_list);
        chan->desc_pendingcount = 0;
-
-out_unlock:
-       spin_unlock_irqrestore(&chan->lock, flags);
 }
 
 /**
 static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
 {
        struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+       unsigned long flags;
 
+       spin_lock_irqsave(&chan->lock, flags);
        xilinx_vdma_start_transfer(chan);
+       spin_unlock_irqrestore(&chan->lock, flags);
 }
 
 /**
 static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
 {
        struct xilinx_vdma_tx_descriptor *desc, *next;
-       unsigned long flags;
-
-       spin_lock_irqsave(&chan->lock, flags);
 
+       /* This function was invoked with lock held */
        if (list_empty(&chan->active_list))
-               goto out_unlock;
+               return;
 
        list_for_each_entry_safe(desc, next, &chan->active_list, node) {
                list_del(&desc->node);
                dma_cookie_complete(&desc->async_tx);
                list_add_tail(&desc->node, &chan->done_list);
        }
-
-out_unlock:
-       spin_unlock_irqrestore(&chan->lock, flags);
 }
 
 /**
        }
 
        if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) {
+               spin_lock(&chan->lock);
                xilinx_vdma_complete_descriptor(chan);
                xilinx_vdma_start_transfer(chan);
+               spin_unlock(&chan->lock);
        }
 
        tasklet_schedule(&chan->tasklet);