/* it better be dead now */
        WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
 
-       cancel_rearming_delayed_work(&ap->hotplug_task);
+       cancel_delayed_work_sync(&ap->hotplug_task);
 
  skip_eh:
        if (ap->pmp_link) {
 
 {
        DPRINTK("ENTER\n");
 
-       cancel_rearming_delayed_work(&ap->sff_pio_task);
+       cancel_delayed_work_sync(&ap->sff_pio_task);
        ap->hsm_task_state = HSM_ST_IDLE;
 
        if (ata_msg_ctl(ap))
 
 
 static void __devexit rackmeter_stop_cpu_sniffer(struct rackmeter *rm)
 {
-       cancel_rearming_delayed_work(&rm->cpu[0].sniffer);
-       cancel_rearming_delayed_work(&rm->cpu[1].sniffer);
+       cancel_delayed_work_sync(&rm->cpu[0].sniffer);
+       cancel_delayed_work_sync(&rm->cpu[1].sniffer);
 }
 
 static int __devinit rackmeter_setup(struct rackmeter *rm)
 
 int dvb_usb_remote_exit(struct dvb_usb_device *d)
 {
        if (d->state & DVB_USB_STATE_REMOTE) {
-               cancel_rearming_delayed_work(&d->rc_query_work);
+               cancel_delayed_work_sync(&d->rc_query_work);
                flush_scheduled_work();
                if (d->props.rc.mode == DVB_RC_LEGACY)
                        input_unregister_device(d->rc_input_dev);
 
 {
        if (dev->sbutton_input_dev != NULL) {
                em28xx_info("Deregistering snapshot button\n");
-               cancel_rearming_delayed_work(&dev->sbutton_query_work);
+               cancel_delayed_work_sync(&dev->sbutton_query_work);
                input_unregister_device(dev->sbutton_input_dev);
                dev->sbutton_input_dev = NULL;
        }
 
 
 static int my3126_interrupt_disable(struct cphy *cphy)
 {
-       cancel_rearming_delayed_work(&cphy->phy_update);
+       cancel_delayed_work_sync(&cphy->phy_update);
        return 0;
 }
 
 
        netif_carrier_off(dev->ndev);
        smp_rmb();
        if (dev->link_polling) {
-               cancel_rearming_delayed_work(&dev->link_work);
+               cancel_delayed_work_sync(&dev->link_work);
                if (dev->link_polling)
                        schedule_delayed_work(&dev->link_work,  PHY_POLL_LINK_OFF);
        }
 
        if (dev->phy.address >= 0) {
                dev->link_polling = 0;
-               cancel_rearming_delayed_work(&dev->link_work);
+               cancel_delayed_work_sync(&dev->link_work);
        }
        mutex_lock(&dev->link_lock);
        emac_netif_stop(dev);
 
 static void housekeeping_disable(struct zd_mac *mac)
 {
        dev_dbg_f(zd_mac_dev(mac), "\n");
-       cancel_rearming_delayed_workqueue(zd_workqueue,
-               &mac->housekeeping.link_led_work);
+       cancel_delayed_work_sync(&mac->housekeeping.link_led_work);
        zd_chip_control_leds(&mac->chip, ZD_LED_OFF);
 }
 
 {
        struct ds2760_device_info *di = platform_get_drvdata(pdev);
 
-       cancel_rearming_delayed_workqueue(di->monitor_wqueue,
-                                         &di->monitor_work);
-       cancel_rearming_delayed_workqueue(di->monitor_wqueue,
-                                         &di->set_charged_work);
+       cancel_delayed_work_sync(&di->monitor_work);
+       cancel_delayed_work_sync(&di->set_charged_work);
        destroy_workqueue(di->monitor_wqueue);
        power_supply_unregister(&di->bat);
        kfree(di);
 
 power_reg_failed_1:
        power_supply_unregister(&pbi->batt);
 power_reg_failed:
-       cancel_rearming_delayed_workqueue(pbi->monitor_wqueue,
-                                               &pbi->monitor_battery);
+       cancel_delayed_work_sync(&pbi->monitor_battery);
 requestirq_failed:
        destroy_workqueue(pbi->monitor_wqueue);
 wqueue_failed:
        struct pmic_power_module_info *pbi = dev_get_drvdata(&pdev->dev);
 
        free_irq(pbi->irq, pbi);
-       cancel_rearming_delayed_workqueue(pbi->monitor_wqueue,
-                                       &pbi->monitor_battery);
+       cancel_delayed_work_sync(&pbi->monitor_battery);
        destroy_workqueue(pbi->monitor_wqueue);
 
        power_supply_unregister(&pbi->usb);
 
        }
 
        psb->trans_scan_timeout = psb->drop_scan_timeout = 0;
-       cancel_rearming_delayed_work(&psb->dwork);
-       cancel_rearming_delayed_work(&psb->drop_dwork);
+       cancel_delayed_work_sync(&psb->dwork);
+       cancel_delayed_work_sync(&psb->drop_dwork);
        flush_scheduled_work();
 
        dprintk("%s: stopped workqueues.\n", __func__);
 
        mutex_unlock(&instance->poll_state_serialize);
 
        if (is_polling)
-               cancel_rearming_delayed_work(&instance->poll_work);
+               cancel_delayed_work_sync(&instance->poll_work);
 
        usb_kill_urb(instance->snd_urb);
        usb_kill_urb(instance->rcv_urb);
 
                return 0;
 
        /* Kill off the delayed work */
-       cancel_rearming_delayed_work(&info->deferred_work);
+       cancel_delayed_work_sync(&info->deferred_work);
 
        /* Run it immediately */
        return schedule_delayed_work(&info->deferred_work, 0);
 
 static void mipid_esd_stop_check(struct mipid_device *md)
 {
        if (md->esd_check != NULL)
-               cancel_rearming_delayed_workqueue(md->esd_wq, &md->esd_work);
+               cancel_delayed_work_sync(&md->esd_work);
 }
 
 static void mipid_esd_work(struct work_struct *work)
 
 void
 nfs4_state_shutdown(void)
 {
-       cancel_rearming_delayed_workqueue(laundry_wq, &laundromat_work);
+       cancel_delayed_work_sync(&laundromat_work);
        destroy_workqueue(laundry_wq);
        locks_end_grace(&nfsd4_manager);
        nfs4_lock_state();
 
        spin_lock(&mru->lock);
        if (mru->queued) {
                spin_unlock(&mru->lock);
-               cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
+               cancel_delayed_work_sync(&mru->work);
                spin_lock(&mru->lock);
        }
 
 
                 * anything expensive but will only modify reap_work
                 * and reschedule the timer.
                */
-               cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu));
+               cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
                /* Now the cache_reaper is guaranteed to be not running. */
                per_cpu(slab_reap_work, cpu).work.func = NULL;
                break;
 
                break;
        case CPU_DOWN_PREPARE:
        case CPU_DOWN_PREPARE_FROZEN:
-               cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu));
+               cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
                per_cpu(vmstat_work, cpu).work.func = NULL;
                break;
        case CPU_DOWN_FAILED:
 
        struct lec_arp_table *entry;
        int i;
 
-       cancel_rearming_delayed_work(&priv->lec_arp_work);
+       cancel_delayed_work_sync(&priv->lec_arp_work);
 
        /*
         * Remove all entries
 
 
                skb_queue_purge(&npinfo->arp_tx);
                skb_queue_purge(&npinfo->txq);
-               cancel_rearming_delayed_work(&npinfo->tx_work);
+               cancel_delayed_work_sync(&npinfo->tx_work);
 
                /* clean after last, unfinished work */
                __skb_queue_purge(&npinfo->txq);
 
 {
        EnterFunction(2);
        ip_vs_trash_cleanup();
-       cancel_rearming_delayed_work(&defense_work);
+       cancel_delayed_work_sync(&defense_work);
        cancel_work_sync(&defense_work.work);
        ip_vs_kill_estimator(&ip_vs_stats);
        unregister_sysctl_table(sysctl_header);
 
 
        dprintk("RPC:       xs_destroy xprt %p\n", xprt);
 
-       cancel_rearming_delayed_work(&transport->connect_worker);
+       cancel_delayed_work_sync(&transport->connect_worker);
 
        xs_close(xprt);
        xs_free_peer_addresses(xprt);