* are the ones that keep the context and ringbuffer backing objects
         * pinned in place.
         */
-       while (!list_empty(&ring->execlist_queue)) {
-               struct drm_i915_gem_request *submit_req;
 
-               submit_req = list_first_entry(&ring->execlist_queue,
-                               struct drm_i915_gem_request,
-                               execlist_link);
-               list_del(&submit_req->execlist_link);
+       if (i915.enable_execlists) {
+               spin_lock_irq(&ring->execlist_lock);
+               while (!list_empty(&ring->execlist_queue)) {
+                       struct drm_i915_gem_request *submit_req;
 
-               if (submit_req->ctx != ring->default_context)
-                       intel_lr_context_unpin(submit_req);
+                       submit_req = list_first_entry(&ring->execlist_queue,
+                                       struct drm_i915_gem_request,
+                                       execlist_link);
+                       list_del(&submit_req->execlist_link);
 
-               i915_gem_request_unreference(submit_req);
+                       if (submit_req->ctx != ring->default_context)
+                               intel_lr_context_unpin(submit_req);
+
+                       i915_gem_request_unreference(submit_req);
+               }
+               spin_unlock_irq(&ring->execlist_lock);
        }
 
        /*