engine->serial++; /* contexts lost */
 }
 
-bool intel_engine_is_idle(struct intel_engine_cs *engine);
 bool intel_engines_are_idle(struct intel_gt *gt);
+bool intel_engine_is_idle(struct intel_engine_cs *engine);
+void intel_engine_flush_submission(struct intel_engine_cs *engine);
 
 void intel_engines_reset_default_submission(struct intel_gt *gt);
 
 
        return idle;
 }
 
+void intel_engine_flush_submission(struct intel_engine_cs *engine)
+{
+       struct tasklet_struct *t = &engine->execlists.tasklet;
+
+       if (__tasklet_is_scheduled(t)) {
+               local_bh_disable();
+               if (tasklet_trylock(t)) {
+                       /* Must wait for any GPU reset in progress. */
+                       if (__tasklet_is_enabled(t))
+                               t->func(t->data);
+                       tasklet_unlock(t);
+               }
+               local_bh_enable();
+       }
+
+       /* Otherwise flush the tasklet if it was running on another cpu */
+       tasklet_unlock_wait(t);
+}
+
 /**
  * intel_engine_is_idle() - Report if the engine has finished process all work
  * @engine: the intel_engine_cs
 
        /* Waiting to drain ELSP? */
        if (execlists_active(&engine->execlists)) {
-               struct tasklet_struct *t = &engine->execlists.tasklet;
-
                synchronize_hardirq(engine->i915->drm.pdev->irq);
 
-               local_bh_disable();
-               if (tasklet_trylock(t)) {
-                       /* Must wait for any GPU reset in progress. */
-                       if (__tasklet_is_enabled(t))
-                               t->func(t->data);
-                       tasklet_unlock(t);
-               }
-               local_bh_enable();
-
-               /* Otherwise flush the tasklet if it was on another cpu */
-               tasklet_unlock_wait(t);
+               intel_engine_flush_submission(engine);
 
                if (execlists_active(&engine->execlists))
                        return false;
 
  * Copyright © 2019 Intel Corporation
  */
 
+#include "i915_drv.h" /* for_each_engine() */
 #include "i915_request.h"
 #include "intel_gt.h"
 #include "intel_gt_pm.h"
                        break;
 }
 
+static void flush_submission(struct intel_gt *gt)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       for_each_engine(engine, gt->i915, id)
+               intel_engine_flush_submission(engine);
+}
+
 long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 {
        struct intel_gt_timelines *timelines = >->timelines;
        if (unlikely(timeout < 0))
                timeout = -timeout, interruptible = false;
 
+       flush_submission(gt); /* kick the ksoftirqd tasklets */
+
        spin_lock_irqsave(&timelines->lock, flags);
        list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
                if (!mutex_trylock(&tl->mutex)) {