return ret;
 
        if (val & DROP_ACTIVE) {
-               ret = i915_gpu_idle(dev);
+               ret = i915_gem_wait_for_idle(dev_priv);
                if (ret)
                        goto unlock;
        }
 
 int __must_check i915_gem_init_hw(struct drm_device *dev);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_engines(struct drm_device *dev);
-int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv);
 int __must_check i915_gem_suspend(struct drm_device *dev);
 void __i915_add_request(struct drm_i915_gem_request *req,
                        struct drm_i915_gem_object *batch_obj,
 
        return __i915_vma_unbind(vma, false);
 }
 
-int i915_gpu_idle(struct drm_device *dev)
+int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
        int ret;
 
+       lockdep_assert_held(&dev_priv->dev->struct_mutex);
+
        for_each_engine(engine, dev_priv) {
                if (engine->last_context == NULL)
                        continue;
 
-               if (!i915.enable_execlists) {
-                       struct drm_i915_gem_request *req;
-
-                       req = i915_gem_request_alloc(engine, NULL);
-                       if (IS_ERR(req))
-                               return PTR_ERR(req);
-
-                       ret = i915_switch_context(req);
-                       i915_add_request_no_flush(req);
-                       if (ret)
-                               return ret;
-               }
-
                ret = intel_engine_idle(engine);
                if (ret)
                        return ret;
        int ret = 0;
 
        mutex_lock(&dev->struct_mutex);
-       ret = i915_gpu_idle(dev);
+       ret = i915_gem_wait_for_idle(dev_priv);
        if (ret)
                goto err;
 
 
 #include "intel_drv.h"
 #include "i915_trace.h"
 
+static int switch_to_pinned_context(struct drm_i915_private *dev_priv)
+{
+       struct intel_engine_cs *engine;
+
+       if (i915.enable_execlists)
+               return 0;
+
+       for_each_engine(engine, dev_priv) {
+               struct drm_i915_gem_request *req;
+               int ret;
+
+               if (engine->last_context == NULL)
+                       continue;
+
+               if (engine->last_context == dev_priv->kernel_context)
+                       continue;
+
+               req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
+               if (IS_ERR(req))
+                       return PTR_ERR(req);
+
+               ret = i915_switch_context(req);
+               i915_add_request_no_flush(req);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+
 static bool
 mark_free(struct i915_vma *vma, struct list_head *unwind)
 {
 
        /* Only idle the GPU and repeat the search once */
        if (pass++ == 0) {
-               ret = i915_gpu_idle(dev);
+               struct drm_i915_private *dev_priv = to_i915(dev);
+
+               ret = switch_to_pinned_context(dev_priv);
                if (ret)
                        return ret;
 
-               i915_gem_retire_requests(to_i915(dev));
+               ret = i915_gem_wait_for_idle(dev_priv);
+               if (ret)
+                       return ret;
+
+               i915_gem_retire_requests(dev_priv);
                goto search_again;
        }
 
        trace_i915_gem_evict_vm(vm);
 
        if (do_idle) {
-               ret = i915_gpu_idle(vm->dev);
+               struct drm_i915_private *dev_priv = to_i915(vm->dev);
+
+               ret = switch_to_pinned_context(dev_priv);
+               if (ret)
+                       return ret;
+
+               ret = i915_gem_wait_for_idle(dev_priv);
                if (ret)
                        return ret;
 
-               i915_gem_retire_requests(to_i915(vm->dev));
+               i915_gem_retire_requests(dev_priv);
 
                WARN_ON(!list_empty(&vm->active_list));
        }
 
 
        if (unlikely(ggtt->do_idle_maps)) {
                dev_priv->mm.interruptible = false;
-               if (i915_gpu_idle(dev_priv->dev)) {
-                       DRM_ERROR("Couldn't idle GPU\n");
+               if (i915_gem_wait_for_idle(dev_priv)) {
+                       DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
                        /* Wait a bit, in hopes it avoids the hang */
                        udelay(10);
                }
 
                return NOTIFY_DONE;
 
        /* Force everything onto the inactive lists */
-       ret = i915_gpu_idle(dev_priv->dev);
+       ret = i915_gem_wait_for_idle(dev_priv);
        if (ret)
                goto out;