seq_puts(m, "HW context ");
                describe_ctx(m, ctx);
-               for_each_ring(ring, dev_priv, i) {
-                       if (dev_priv->kernel_context == ctx)
-                               seq_printf(m, "(default context %s) ",
-                                          ring->name);
-               }
+               if (ctx == dev_priv->kernel_context)
+                       seq_printf(m, "(kernel context) ");
 
                if (i915.enable_execlists) {
                        seq_putc(m, '\n');
        if (ret)
                return ret;
 
-       list_for_each_entry(ctx, &dev_priv->context_list, link) {
-               for_each_ring(ring, dev_priv, i) {
-                       if (dev_priv->kernel_context != ctx)
+       list_for_each_entry(ctx, &dev_priv->context_list, link)
+               if (ctx != dev_priv->kernel_context)
+                       for_each_ring(ring, dev_priv, i)
                                i915_dump_lrc_obj(m, ctx, ring);
-               }
-       }
 
        mutex_unlock(&dev->struct_mutex);
 
 
                i915_gem_request_remove_from_client(req);
 
        if (ctx) {
-               if (i915.enable_execlists) {
-                       if (ctx != req->i915->kernel_context)
-                               intel_lr_context_unpin(req);
-               }
+               if (i915.enable_execlists && ctx != req->i915->kernel_context)
+                       intel_lr_context_unpin(req);
 
                i915_gem_context_unreference(ctx);
        }
 
 
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 {
-       int ret;
+       int ret = 0;
 
        request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
 
-       if (request->ctx != request->i915->kernel_context) {
-               ret = intel_lr_context_pin(request);
-               if (ret)
-                       return ret;
-       }
-
        if (i915.enable_guc_submission) {
                /*
                 * Check that the GuC has space for the request before
                        return ret;
        }
 
-       return 0;
+       if (request->ctx != request->i915->kernel_context)
+               ret = intel_lr_context_pin(request);
+
+       return ret;
 }
 
 static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
 {
        int i;
 
-       for (i = 0; i < I915_NUM_RINGS; i++) {
+       for (i = I915_NUM_RINGS; --i >= 0; ) {
+               struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
                struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
 
-               if (ctx_obj) {
-                       struct intel_ringbuffer *ringbuf =
-                                       ctx->engine[i].ringbuf;
-                       struct intel_engine_cs *ring = ringbuf->ring;
+               if (!ctx_obj)
+                       continue;
 
-                       if (ctx == ctx->i915->kernel_context) {
-                               intel_unpin_ringbuffer_obj(ringbuf);
-                               i915_gem_object_ggtt_unpin(ctx_obj);
-                       }
-                       WARN_ON(ctx->engine[ring->id].pin_count);
-                       intel_ringbuffer_free(ringbuf);
-                       drm_gem_object_unreference(&ctx_obj->base);
+               if (ctx == ctx->i915->kernel_context) {
+                       intel_unpin_ringbuffer_obj(ringbuf);
+                       i915_gem_object_ggtt_unpin(ctx_obj);
                }
+
+               WARN_ON(ctx->engine[i].pin_count);
+               intel_ringbuffer_free(ringbuf);
+               drm_gem_object_unreference(&ctx_obj->base);
        }
 }
 
  */
 
 int intel_lr_context_deferred_alloc(struct intel_context *ctx,
-                                    struct intel_engine_cs *ring)
+                                   struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_gem_object *ctx_obj;