static struct ep93xx_dma_desc *
 ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
 {
+       if (list_empty(&edmac->active))
+               return NULL;
+
        return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
 }
 
  */
 static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
 {
+       struct ep93xx_dma_desc *desc;
+
        list_rotate_left(&edmac->active);
 
        if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
                return true;
 
+       desc = ep93xx_dma_get_active(edmac);
+       if (!desc)
+               return false;
+
        /*
         * If txd.cookie is set it means that we are back in the first
         * descriptor in the chain and hence done with it.
         */
-       return !ep93xx_dma_get_active(edmac)->txd.cookie;
+       return !desc->txd.cookie;
 }
 
 /*
 
 static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
 {
-       struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+       struct ep93xx_dma_desc *desc;
        u32 bus_addr;
 
+       desc = ep93xx_dma_get_active(edmac);
+       if (!desc) {
+               dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
+               return;
+       }
+
        if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
                bus_addr = desc->src_addr;
        else
 
 static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
 {
-       struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+       struct ep93xx_dma_desc *desc;
+
+       desc = ep93xx_dma_get_active(edmac);
+       if (!desc) {
+               dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
+               return;
+       }
 
        if (edmac->buffer == 0) {
                writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
 {
        struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
        struct ep93xx_dma_desc *desc, *d;
-       dma_async_tx_callback callback;
-       void *callback_param;
+       dma_async_tx_callback callback = NULL;
+       void *callback_param = NULL;
        LIST_HEAD(list);
 
        spin_lock_irq(&edmac->lock);
+       /*
+        * If dma_terminate_all() was called before we get to run, the active
+        * list has become empty. If that happens we aren't supposed to do
+        * anything more than call ep93xx_dma_advance_work().
+        */
        desc = ep93xx_dma_get_active(edmac);
-       if (desc->complete) {
-               edmac->last_completed = desc->txd.cookie;
-               list_splice_init(&edmac->active, &list);
+       if (desc) {
+               if (desc->complete) {
+                       edmac->last_completed = desc->txd.cookie;
+                       list_splice_init(&edmac->active, &list);
+               }
+               callback = desc->txd.callback;
+               callback_param = desc->txd.callback_param;
        }
        spin_unlock_irq(&edmac->lock);
 
        /* Pick up the next descriptor from the queue */
        ep93xx_dma_advance_work(edmac);
 
-       callback = desc->txd.callback;
-       callback_param = desc->txd.callback_param;
-
        /* Now we can release all the chained descriptors */
        list_for_each_entry_safe(desc, d, &list, node) {
                /*
 static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
 {
        struct ep93xx_dma_chan *edmac = dev_id;
+       struct ep93xx_dma_desc *desc;
        irqreturn_t ret = IRQ_HANDLED;
 
        spin_lock(&edmac->lock);
 
+       desc = ep93xx_dma_get_active(edmac);
+       if (!desc) {
+               dev_warn(chan2dev(edmac),
+                        "got interrupt while active list is empty\n");
+               spin_unlock(&edmac->lock);
+               return IRQ_NONE;
+       }
+
        switch (edmac->edma->hw_interrupt(edmac)) {
        case INTERRUPT_DONE:
-               ep93xx_dma_get_active(edmac)->complete = true;
+               desc->complete = true;
                tasklet_schedule(&edmac->tasklet);
                break;