static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
                                        struct intel_engine_cs *ring)
 {
+       struct intel_ringbuffer *buffer;
+
        while (!list_empty(&ring->active_list)) {
                struct drm_i915_gem_object *obj;
 
 
                i915_gem_request_retire(request);
        }
+
+       /* Having flushed all requests from all queues, we know that all
+        * ringbuffers must now be empty. However, since we do not reclaim
+        * all space when retiring the request (to prevent HEADs colliding
+        * with rapid ringbuffer wraparound) the amount of available space
+        * upon reset is less than when we start. Do one more pass over
+        * all the ringbuffers to reset last_retired_head.
+        */
+       list_for_each_entry(buffer, &ring->buffers, link) {
+               buffer->last_retired_head = buffer->tail;
+               intel_ring_update_space(buffer);
+       }
 }
 
 void i915_gem_reset(struct drm_device *dev)
 
        i915_gem_batch_pool_init(dev, &ring->batch_pool);
        init_waitqueue_head(&ring->irq_queue);
 
+       INIT_LIST_HEAD(&ring->buffers);
        INIT_LIST_HEAD(&ring->execlist_queue);
        INIT_LIST_HEAD(&ring->execlist_retired_req_list);
        spin_lock_init(&ring->execlist_lock);
 
        int ret;
 
        ring = kzalloc(sizeof(*ring), GFP_KERNEL);
-       if (ring == NULL)
+       if (ring == NULL) {
+               DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
+                                engine->name);
                return ERR_PTR(-ENOMEM);
+       }
 
        ring->ring = engine;
+       list_add(&ring->link, &engine->buffers);
 
        ring->size = size;
        /* Workaround an erratum on the i830 which causes a hang if
 
        ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
        if (ret) {
-               DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
-                         engine->name, ret);
+               DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
+                                engine->name, ret);
+               list_del(&ring->link);
                kfree(ring);
                return ERR_PTR(ret);
        }
 intel_ringbuffer_free(struct intel_ringbuffer *ring)
 {
        intel_destroy_ringbuffer_obj(ring);
+       list_del(&ring->link);
        kfree(ring);
 }
 
        INIT_LIST_HEAD(&ring->active_list);
        INIT_LIST_HEAD(&ring->request_list);
        INIT_LIST_HEAD(&ring->execlist_queue);
+       INIT_LIST_HEAD(&ring->buffers);
        i915_gem_batch_pool_init(dev, &ring->batch_pool);
        memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
 
 
        void __iomem *virtual_start;
 
        struct intel_engine_cs *ring;
+       struct list_head link;
 
        u32 head;
        u32 tail;
        u32             mmio_base;
        struct          drm_device *dev;
        struct intel_ringbuffer *buffer;
+       struct list_head buffers;
 
        /*
         * A pool of objects to use as shadow copies of client batch buffers