spin_lock_irq(&ev->lock);
        ev->clearing |= mask;
-       if (!ev->block) {
-               cancel_delayed_work(&ev->dwork);
-               queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
-       }
+       if (!ev->block)
+               mod_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
        spin_unlock_irq(&ev->lock);
 }
 
 
                return;
 
        INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
-       queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
+       mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
 }
 
 /*
 
        mutex_lock(&mem_ctls_mutex);
 
-       /* scan the list and turn off all workq timers, doing so under lock
-        */
-       list_for_each(item, &mc_devices) {
-               mci = list_entry(item, struct mem_ctl_info, link);
-
-               if (mci->op_state == OP_RUNNING_POLL)
-                       cancel_delayed_work(&mci->work);
-       }
-
-       mutex_unlock(&mem_ctls_mutex);
-
-
-       /* re-walk the list, and reset the poll delay */
-       mutex_lock(&mem_ctls_mutex);
-
        list_for_each(item, &mc_devices) {
                mci = list_entry(item, struct mem_ctl_info, link);
 
 
 {
        unsigned long delay;
 
-       cancel_delayed_work(&work);
-
        delay = time - jiffies;
        if ((long)delay <= 0)
                delay = 1;
 
-       queue_delayed_work(addr_wq, &work, delay);
+       mod_delayed_work(addr_wq, &work, delay);
 }
 
 static void queue_req(struct addr_req *req)
 
                        }
                }
                if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_SFP_D) {
-                       if (nesdev->link_recheck)
-                               cancel_delayed_work(&nesdev->work);
                        nesdev->link_recheck = 1;
-                       schedule_delayed_work(&nesdev->work,
-                                             NES_LINK_RECHECK_DELAY);
+                       mod_delayed_work(system_wq, &nesdev->work,
+                                        NES_LINK_RECHECK_DELAY);
                }
        }
 
 
 
        spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
        if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_SFP_D) {
-               if (nesdev->link_recheck)
-                       cancel_delayed_work(&nesdev->work);
                nesdev->link_recheck = 1;
-               schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY);
+               mod_delayed_work(system_wq, &nesdev->work,
+                                NES_LINK_RECHECK_DELAY);
        }
        spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
 
 
 
        /* Make sure the RF Kill check timer is running */
        priv->stop_rf_kill = 0;
-       cancel_delayed_work(&priv->rf_kill);
-       schedule_delayed_work(&priv->rf_kill, round_jiffies_relative(HZ));
+       mod_delayed_work(system_wq, &priv->rf_kill, round_jiffies_relative(HZ));
 }
 
 static void send_scan_event(void *data)
                                          "disabled by HW switch\n");
                        /* Make sure the RF_KILL check timer is running */
                        priv->stop_rf_kill = 0;
-                       cancel_delayed_work(&priv->rf_kill);
-                       schedule_delayed_work(&priv->rf_kill,
-                                             round_jiffies_relative(HZ));
+                       mod_delayed_work(system_wq, &priv->rf_kill,
+                                        round_jiffies_relative(HZ));
                } else
                        schedule_reset(priv);
        }
 
 {
        struct zd_usb_rx *rx = &usb->rx;
 
-       cancel_delayed_work(&rx->idle_work);
-       queue_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL);
+       mod_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL);
 }
 
 static inline void init_usb_interrupt(struct zd_usb *usb)
 
 
 static void fan_watchdog_reset(void)
 {
-       static int fan_watchdog_active;
-
        if (fan_control_access_mode == TPACPI_FAN_WR_NONE)
                return;
 
-       if (fan_watchdog_active)
-               cancel_delayed_work(&fan_watchdog_task);
-
        if (fan_watchdog_maxinterval > 0 &&
-           tpacpi_lifecycle != TPACPI_LIFE_EXITING) {
-               fan_watchdog_active = 1;
-               if (!queue_delayed_work(tpacpi_wq, &fan_watchdog_task,
-                               msecs_to_jiffies(fan_watchdog_maxinterval
-                                                * 1000))) {
-                       pr_err("failed to queue the fan watchdog, "
-                              "watchdog will not trigger\n");
-               }
-       } else
-               fan_watchdog_active = 0;
+           tpacpi_lifecycle != TPACPI_LIFE_EXITING)
+               mod_delayed_work(tpacpi_wq, &fan_watchdog_task,
+                       msecs_to_jiffies(fan_watchdog_maxinterval * 1000));
+       else
+               cancel_delayed_work(&fan_watchdog_task);
 }
 
 static void fan_watchdog_fire(struct work_struct *ignored)
 
        if (!delayed_work_pending(&cm_monitor_work) ||
            (delayed_work_pending(&cm_monitor_work) &&
             time_after(next_polling, _next_polling))) {
-               cancel_delayed_work_sync(&cm_monitor_work);
                next_polling = jiffies + polling_jiffy;
-               queue_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
+               mod_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
        }
 
 out:
        if (cm_suspended)
                device_set_wakeup_capable(cm->dev, true);
 
-       if (delayed_work_pending(&cm->fullbatt_vchk_work))
-               cancel_delayed_work(&cm->fullbatt_vchk_work);
-       queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
-                          msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
+       mod_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
+                        msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
        cm->fullbatt_vchk_jiffies_at = jiffies + msecs_to_jiffies(
                                       desc->fullbatt_vchkdrop_ms);
 
 
 
        dev_dbg(di->dev, "%s\n", __func__);
 
-       cancel_delayed_work(&di->monitor_work);
-       queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10);
+       mod_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10);
 }
 
 
 
        /* postpone the actual work by 20 secs. This is for debouncing GPIO
         * signals and to let the current value settle. See AN4188. */
-       cancel_delayed_work(&di->set_charged_work);
-       queue_delayed_work(di->monitor_wqueue, &di->set_charged_work, HZ * 20);
+       mod_delayed_work(di->monitor_wqueue, &di->set_charged_work, HZ * 20);
 }
 
 static int ds2760_battery_get_property(struct power_supply *psy,
        di->charge_status = POWER_SUPPLY_STATUS_UNKNOWN;
        power_supply_changed(&di->bat);
 
-       cancel_delayed_work(&di->monitor_work);
-       queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ);
+       mod_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ);
 
        return 0;
 }
 
 {
        struct jz_battery *jz_battery = psy_to_jz_battery(psy);
 
-       cancel_delayed_work(&jz_battery->work);
-       schedule_delayed_work(&jz_battery->work, 0);
+       mod_delayed_work(system_wq, &jz_battery->work, 0);
 }
 
 static irqreturn_t jz_battery_charge_irq(int irq, void *data)
 {
        struct jz_battery *jz_battery = data;
 
-       cancel_delayed_work(&jz_battery->work);
-       schedule_delayed_work(&jz_battery->work, 0);
+       mod_delayed_work(system_wq, &jz_battery->work, 0);
 
        return IRQ_HANDLED;
 }
 
 static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
                                            int delay)
 {
-       cancel_delayed_work(&(tz->poll_queue));
-
-       if (!delay)
-               return;
-
        if (delay > 1000)
-               queue_delayed_work(system_freezable_wq, &(tz->poll_queue),
-                                     round_jiffies(msecs_to_jiffies(delay)));
+               mod_delayed_work(system_freezable_wq, &tz->poll_queue,
+                                round_jiffies(msecs_to_jiffies(delay)));
+       else if (delay)
+               mod_delayed_work(system_freezable_wq, &tz->poll_queue,
+                                msecs_to_jiffies(delay));
        else
-               queue_delayed_work(system_freezable_wq, &(tz->poll_queue),
-                                     msecs_to_jiffies(delay));
+               cancel_delayed_work(&tz->poll_queue);
 }
 
 static void thermal_zone_device_passive(struct thermal_zone_device *tz,
 
  */
 void afs_flush_callback_breaks(struct afs_server *server)
 {
-       cancel_delayed_work(&server->cb_break_work);
-       queue_delayed_work(afs_callback_update_worker,
-                          &server->cb_break_work, 0);
+       mod_delayed_work(afs_callback_update_worker, &server->cb_break_work, 0);
 }
 
 #if 0
 
                expiry = server->time_of_death + afs_server_timeout;
                if (expiry > now) {
                        delay = (expiry - now) * HZ;
-                       if (!queue_delayed_work(afs_wq, &afs_server_reaper,
-                                               delay)) {
-                               cancel_delayed_work(&afs_server_reaper);
-                               queue_delayed_work(afs_wq, &afs_server_reaper,
-                                                  delay);
-                       }
+                       mod_delayed_work(afs_wq, &afs_server_reaper, delay);
                        break;
                }
 
 void __exit afs_purge_servers(void)
 {
        afs_server_timeout = 0;
-       cancel_delayed_work(&afs_server_reaper);
-       queue_delayed_work(afs_wq, &afs_server_reaper, 0);
+       mod_delayed_work(afs_wq, &afs_server_reaper, 0);
 }
 
                if (expiry > now) {
                        delay = (expiry - now) * HZ;
                        _debug("delay %lu", delay);
-                       if (!queue_delayed_work(afs_wq, &afs_vlocation_reap,
-                                               delay)) {
-                               cancel_delayed_work(&afs_vlocation_reap);
-                               queue_delayed_work(afs_wq, &afs_vlocation_reap,
-                                                  delay);
-                       }
+                       mod_delayed_work(afs_wq, &afs_vlocation_reap, delay);
                        break;
                }
 
        spin_lock(&afs_vlocation_updates_lock);
        list_del_init(&afs_vlocation_updates);
        spin_unlock(&afs_vlocation_updates_lock);
-       cancel_delayed_work(&afs_vlocation_update);
-       queue_delayed_work(afs_vlocation_update_worker,
-                          &afs_vlocation_update, 0);
+       mod_delayed_work(afs_vlocation_update_worker, &afs_vlocation_update, 0);
        destroy_workqueue(afs_vlocation_update_worker);
 
-       cancel_delayed_work(&afs_vlocation_reap);
-       queue_delayed_work(afs_wq, &afs_vlocation_reap, 0);
+       mod_delayed_work(afs_wq, &afs_vlocation_reap, 0);
 }
 
 /*
 
                timeout = 5 * HZ;
        dprintk("%s: requeueing work. Lease period = %ld\n",
                        __func__, (timeout + HZ - 1) / HZ);
-       cancel_delayed_work(&clp->cl_renewd);
-       schedule_delayed_work(&clp->cl_renewd, timeout);
+       mod_delayed_work(system_wq, &clp->cl_renewd, timeout);
        set_bit(NFS_CS_RENEWD, &clp->cl_res_state);
        spin_unlock(&clp->cl_lock);
 }
 
        if (dst_garbage.timer_inc > DST_GC_INC) {
                dst_garbage.timer_inc = DST_GC_INC;
                dst_garbage.timer_expires = DST_GC_MIN;
-               cancel_delayed_work(&dst_gc_work);
-               schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires);
+               mod_delayed_work(system_wq, &dst_gc_work,
+                                dst_garbage.timer_expires);
        }
        spin_unlock_bh(&dst_garbage.lock);
 }
 
        rfkill_op_pending = true;
        if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
                /* bypass the limiter for EPO */
-               cancel_delayed_work(&rfkill_op_work);
-               schedule_delayed_work(&rfkill_op_work, 0);
+               mod_delayed_work(system_wq, &rfkill_op_work, 0);
                rfkill_last_scheduled = jiffies;
        } else
                rfkill_schedule_ratelimited();