return IRQ_HANDLED;
 }
 
-static void ioat1_cleanup_tasklet(unsigned long data);
-
 /* common channel initialization */
-void ioat_init_channel(struct ioatdma_device *device,
-                      struct ioat_chan_common *chan, int idx,
-                      void (*timer_fn)(unsigned long),
-                      void (*tasklet)(unsigned long),
-                      unsigned long ioat)
+void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx)
 {
        struct dma_device *dma = &device->common;
+       struct dma_chan *c = &chan->common;
+       unsigned long data = (unsigned long) c;
 
        chan->device = device;
        chan->reg_base = device->reg_base + (0x80 * (idx + 1));
        list_add_tail(&chan->common.device_node, &dma->channels);
        device->idx[idx] = chan;
        init_timer(&chan->timer);
-       chan->timer.function = timer_fn;
-       chan->timer.data = ioat;
-       tasklet_init(&chan->cleanup_task, tasklet, ioat);
+       chan->timer.function = device->timer_fn;
+       chan->timer.data = data;
+       tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
        tasklet_disable(&chan->cleanup_task);
 }
 
-static void ioat1_timer_event(unsigned long data);
-
 /**
  * ioat1_dma_enumerate_channels - find and initialize the device's channels
  * @device: the device to be enumerated
                if (!ioat)
                        break;
 
-               ioat_init_channel(device, &ioat->base, i,
-                                 ioat1_timer_event,
-                                 ioat1_cleanup_tasklet,
-                                 (unsigned long) ioat);
+               ioat_init_channel(device, &ioat->base, i);
                ioat->xfercap = xfercap;
                spin_lock_init(&ioat->desc_lock);
                INIT_LIST_HEAD(&ioat->free_desc);
        return &desc->txd;
 }
 
-static void ioat1_cleanup_tasklet(unsigned long data)
+static void ioat1_cleanup_event(unsigned long data)
 {
-       struct ioat_dma_chan *chan = (void *)data;
+       struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
 
-       ioat1_cleanup(chan);
-       writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET);
+       ioat1_cleanup(ioat);
+       writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
 }
 
 void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
 
 static void ioat1_timer_event(unsigned long data)
 {
-       struct ioat_dma_chan *ioat = (void *) data;
+       struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
        struct ioat_chan_common *chan = &ioat->base;
 
        dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
        spin_unlock_bh(&chan->cleanup_lock);
 }
 
-static enum dma_status
-ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie,
+enum dma_status
+ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
                      dma_cookie_t *done, dma_cookie_t *used)
 {
-       struct ioat_dma_chan *ioat = to_ioat_chan(c);
+       struct ioat_chan_common *chan = to_chan_common(c);
+       struct ioatdma_device *device = chan->device;
 
        if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
                return DMA_SUCCESS;
 
-       ioat1_cleanup(ioat);
+       device->cleanup_fn((unsigned long) c);
 
        return ioat_is_complete(c, cookie, done, used);
 }
        device->intr_quirk = ioat1_intr_quirk;
        device->enumerate_channels = ioat1_enumerate_channels;
        device->self_test = ioat_dma_self_test;
+       device->timer_fn = ioat1_timer_event;
+       device->cleanup_fn = ioat1_cleanup_event;
        dma = &device->common;
        dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
        dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
        dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
        dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
-       dma->device_is_tx_complete = ioat1_dma_is_complete;
+       dma->device_is_tx_complete = ioat_is_dma_complete;
 
        err = ioat_probe(device);
        if (err)
 
  * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
  * @enumerate_channels: hw version specific channel enumeration
  * @reset_hw: hw version specific channel (re)initialization
- * @cleanup_tasklet: select between the v2 and v3 cleanup routines
+ * @cleanup_fn: select between the v2 and v3 cleanup routines
  * @timer_fn: select between the v2 and v3 timer watchdog routines
  * @self_test: hardware version specific self test for each supported op type
  *
        void (*intr_quirk)(struct ioatdma_device *device);
        int (*enumerate_channels)(struct ioatdma_device *device);
        int (*reset_hw)(struct ioat_chan_common *chan);
-       void (*cleanup_tasklet)(unsigned long data);
+       void (*cleanup_fn)(unsigned long data);
        void (*timer_fn)(unsigned long data);
        int (*self_test)(struct ioatdma_device *device);
 };
                                              void __iomem *iobase);
 unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
 void ioat_init_channel(struct ioatdma_device *device,
-                      struct ioat_chan_common *chan, int idx,
-                      void (*timer_fn)(unsigned long),
-                      void (*tasklet)(unsigned long),
-                      unsigned long ioat);
+                      struct ioat_chan_common *chan, int idx);
+enum dma_status ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
+                                    dma_cookie_t *done, dma_cookie_t *used);
 void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
                    size_t len, struct ioat_dma_descriptor *hw);
 bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
 
        spin_unlock_bh(&chan->cleanup_lock);
 }
 
-void ioat2_cleanup_tasklet(unsigned long data)
+void ioat2_cleanup_event(unsigned long data)
 {
-       struct ioat2_dma_chan *ioat = (void *) data;
+       struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
 
        ioat2_cleanup(ioat);
        writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
 
 void ioat2_timer_event(unsigned long data)
 {
-       struct ioat2_dma_chan *ioat = (void *) data;
+       struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
        struct ioat_chan_common *chan = &ioat->base;
 
        spin_lock_bh(&chan->cleanup_lock);
                if (!ioat)
                        break;
 
-               ioat_init_channel(device, &ioat->base, i,
-                                 device->timer_fn,
-                                 device->cleanup_tasklet,
-                                 (unsigned long) ioat);
+               ioat_init_channel(device, &ioat->base, i);
                ioat->xfercap_log = xfercap_log;
                spin_lock_init(&ioat->ring_lock);
                if (device->reset_hw(&ioat->base)) {
 
                        mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
                        spin_unlock_bh(&chan->cleanup_lock);
-                       device->timer_fn((unsigned long) ioat);
+                       device->timer_fn((unsigned long) &chan->common);
                } else
                        spin_unlock_bh(&chan->cleanup_lock);
                return -ENOMEM;
 
        tasklet_disable(&chan->cleanup_task);
        del_timer_sync(&chan->timer);
-       device->cleanup_tasklet((unsigned long) ioat);
+       device->cleanup_fn((unsigned long) c);
        device->reset_hw(chan);
 
        spin_lock_bh(&ioat->ring_lock);
        ioat->dmacount = 0;
 }
 
-enum dma_status
-ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
-                    dma_cookie_t *done, dma_cookie_t *used)
-{
-       struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-       struct ioatdma_device *device = ioat->base.device;
-
-       if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
-               return DMA_SUCCESS;
-
-       device->cleanup_tasklet((unsigned long) ioat);
-
-       return ioat_is_complete(c, cookie, done, used);
-}
-
 static ssize_t ring_size_show(struct dma_chan *c, char *page)
 {
        struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
 
        device->enumerate_channels = ioat2_enumerate_channels;
        device->reset_hw = ioat2_reset_hw;
-       device->cleanup_tasklet = ioat2_cleanup_tasklet;
+       device->cleanup_fn = ioat2_cleanup_event;
        device->timer_fn = ioat2_timer_event;
        device->self_test = ioat_dma_self_test;
        dma = &device->common;
        dma->device_issue_pending = ioat2_issue_pending;
        dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
        dma->device_free_chan_resources = ioat2_free_chan_resources;
-       dma->device_is_tx_complete = ioat2_is_complete;
+       dma->device_is_tx_complete = ioat_is_dma_complete;
 
        err = ioat_probe(device);
        if (err)
 
 void ioat2_issue_pending(struct dma_chan *chan);
 int ioat2_alloc_chan_resources(struct dma_chan *c);
 void ioat2_free_chan_resources(struct dma_chan *c);
-enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
-                                 dma_cookie_t *done, dma_cookie_t *used);
 void __ioat2_restart_chan(struct ioat2_dma_chan *ioat);
 bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
 void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
-void ioat2_cleanup_tasklet(unsigned long data);
+void ioat2_cleanup_event(unsigned long data);
 void ioat2_timer_event(unsigned long data);
 int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
 int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
 
        spin_unlock_bh(&chan->cleanup_lock);
 }
 
-static void ioat3_cleanup_tasklet(unsigned long data)
+static void ioat3_cleanup_event(unsigned long data)
 {
-       struct ioat2_dma_chan *ioat = (void *) data;
+       struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
 
        ioat3_cleanup_sync(ioat);
        writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
 
 static void ioat3_timer_event(unsigned long data)
 {
-       struct ioat2_dma_chan *ioat = (void *) data;
+       struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
        struct ioat_chan_common *chan = &ioat->base;
 
        spin_lock_bh(&chan->cleanup_lock);
 
        if (is_raid_device) {
                dma->device_is_tx_complete = ioat3_is_complete;
-               device->cleanup_tasklet = ioat3_cleanup_tasklet;
+               device->cleanup_fn = ioat3_cleanup_event;
                device->timer_fn = ioat3_timer_event;
        } else {
-               dma->device_is_tx_complete = ioat2_is_complete;
-               device->cleanup_tasklet = ioat2_cleanup_tasklet;
+               dma->device_is_tx_complete = ioat_is_dma_complete;
+               device->cleanup_fn = ioat2_cleanup_event;
                device->timer_fn = ioat2_timer_event;
        }