}
 
 /**
- * __cleanup - reclaim used descriptors
+ * __ioat_cleanup - reclaim used descriptors
  * @ioat_chan: channel (ring) to clean
  * @phys_complete: zeroed (or not) completion address (from status)
  */
-static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
+static void __ioat_cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
 {
        struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
        struct ioat_ring_ent *desc;
        spin_lock_bh(&ioat_chan->cleanup_lock);
 
        if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
-               __cleanup(ioat_chan, phys_complete);
+               __ioat_cleanup(ioat_chan, phys_complete);
 
        if (is_ioat_halted(*ioat_chan->completion)) {
                u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
 
        ioat_quiesce(ioat_chan, 0);
        if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
-               __cleanup(ioat_chan, phys_complete);
+               __ioat_cleanup(ioat_chan, phys_complete);
 
        __ioat_restart_chan(ioat_chan);
 }
 
        /* cleanup so tail points to descriptor that caused the error */
        if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
-               __cleanup(ioat_chan, phys_complete);
+               __ioat_cleanup(ioat_chan, phys_complete);
 
        chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
        pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
                /* timer restarted in ioat_cleanup_preamble
                 * and IOAT_COMPLETION_ACK cleared
                 */
-               __cleanup(ioat_chan, phys_complete);
+               __ioat_cleanup(ioat_chan, phys_complete);
                goto unlock_out;
        }