struct list_head list;
        unsigned int restart_count;
        time64_t last_error;
+       bool backoff;
        char name[];
 };
 
        if (!data)
                return at_least;
 
-       if (ktime_get_boottime_seconds() - data->last_error >=
+       if (!data->backoff &&
+           ktime_get_boottime_seconds() - data->last_error >=
                        IWL_TRANS_RESET_OK_TIME)
                data->restart_count = 0;
 
        index = data->restart_count;
-       if (index >= ARRAY_SIZE(escalation_list))
+       if (index >= ARRAY_SIZE(escalation_list)) {
                index = ARRAY_SIZE(escalation_list) - 1;
+               if (!data->backoff) {
+                       data->backoff = true;
+                       return IWL_RESET_MODE_BACKOFF;
+               }
+               data->backoff = false;
+       }
 
        return max(at_least, escalation_list[index]);
 }
 
 static void iwl_trans_restart_wk(struct work_struct *wk)
 {
-       struct iwl_trans *trans = container_of(wk, typeof(*trans), restart.wk);
+       struct iwl_trans *trans = container_of(wk, typeof(*trans),
+                                              restart.wk.work);
        struct iwl_trans_reprobe *reprobe;
        enum iwl_reset_mode mode;
 
                return;
 
        mode = iwl_trans_determine_restart_mode(trans);
+       if (mode == IWL_RESET_MODE_BACKOFF) {
+               IWL_ERR(trans, "Too many device errors - delay next reset\n");
+               queue_delayed_work(system_unbound_wq, &trans->restart.wk,
+                                  IWL_TRANS_RESET_DELAY);
+               return;
+       }
 
        iwl_trans_inc_restart_count(trans->dev);
 
        trans->dev = dev;
        trans->num_rx_queues = 1;
 
-       INIT_WORK(&trans->restart.wk, iwl_trans_restart_wk);
+       INIT_DELAYED_WORK(&trans->restart.wk, iwl_trans_restart_wk);
 
        return trans;
 }
 
 void iwl_trans_free(struct iwl_trans *trans)
 {
-       cancel_work_sync(&trans->restart.wk);
+       cancel_delayed_work_sync(&trans->restart.wk);
        kmem_cache_destroy(trans->dev_cmd_pool);
 }
 
 
        iwl_trans_pcie_op_mode_leave(trans);
 
-       cancel_work_sync(&trans->restart.wk);
+       cancel_delayed_work_sync(&trans->restart.wk);
 
        trans->op_mode = NULL;
 
 
        struct iwl_dma_ptr invalid_tx_cmd;
 
        struct {
-               struct work_struct wk;
+               struct delayed_work wk;
                struct iwl_fw_error_dump_mode mode;
                bool during_reset;
        } restart;
         */
        trans->restart.during_reset = test_bit(STATUS_IN_SW_RESET,
                                               &trans->status);
-       queue_work(system_unbound_wq, &trans->restart.wk);
+       queue_delayed_work(system_unbound_wq, &trans->restart.wk, 0);
 }
 
 static inline void iwl_trans_fw_error(struct iwl_trans *trans,
        IWL_RESET_MODE_RESCAN,
        IWL_RESET_MODE_FUNC_RESET,
        IWL_RESET_MODE_PROD_RESET,
+
+       /* keep last - special backoff value */
+       IWL_RESET_MODE_BACKOFF,
 };
 
 void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode);