We don't need to track every ring for its lifetime as they are managed
by the contexts/engines. What we do want to track are the live rings so
that we can sporadically clean up requests if userspace falls behind. We
can simply restrict the gt->rings list to being only gt->live_rings.
v2: s/live/active/ for consistency with gt.active_requests
Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180430131503.5375-4-chris@chris-wilson.co.uk
 
 
                struct i915_gem_timeline global_timeline;
                struct list_head timelines;
-               struct list_head rings;
+
+               struct list_head active_rings;
                u32 active_requests;
                u32 request_serial;
 
 
 {
        lockdep_assert_held(&i915->drm.struct_mutex);
        GEM_BUG_ON(i915->gt.active_requests);
+       GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
 
        if (!i915->gt.awake)
                return I915_EPOCH_INVALID;
        if (!dev_priv->priorities)
                goto err_dependencies;
 
-       mutex_lock(&dev_priv->drm.struct_mutex);
-       INIT_LIST_HEAD(&dev_priv->gt.rings);
        INIT_LIST_HEAD(&dev_priv->gt.timelines);
+       INIT_LIST_HEAD(&dev_priv->gt.active_rings);
+
+       mutex_lock(&dev_priv->drm.struct_mutex);
        err = i915_gem_timeline_init__global(dev_priv);
        mutex_unlock(&dev_priv->drm.struct_mutex);
        if (err)
 
                 * noops - they are safe to be replayed on a reset.
                 */
                tail = READ_ONCE(request->tail);
+               list_del(&ring->active_link);
        } else {
                tail = request->postfix;
        }
        i915_gem_active_set(&timeline->last_request, request);
 
        list_add_tail(&request->ring_link, &ring->request_list);
+       if (list_is_first(&request->ring_link, &ring->request_list))
+               list_add(&ring->active_link, &request->i915->gt.active_rings);
        request->emitted_jiffies = jiffies;
 
        /*
 
 void i915_retire_requests(struct drm_i915_private *i915)
 {
-       struct intel_ring *ring, *next;
+       struct intel_ring *ring, *tmp;
 
        lockdep_assert_held(&i915->drm.struct_mutex);
 
        if (!i915->gt.active_requests)
                return;
 
-       list_for_each_entry_safe(ring, next, &i915->gt.rings, link)
+       /* An outstanding request must be on a still active ring somewhere */
+       GEM_BUG_ON(list_empty(&i915->gt.active_rings));
+
+       list_for_each_entry_safe(ring, tmp, &i915->gt.active_rings, active_link)
                ring_retire_requests(ring);
 }
 
 
        }
        ring->vma = vma;
 
-       list_add(&ring->link, &engine->i915->gt.rings);
-
        return ring;
 }
 
        i915_vma_close(ring->vma);
        __i915_gem_object_release_unless_active(obj);
 
-       list_del(&ring->link);
-
        kfree(ring);
 }
 
 
        void *vaddr;
 
        struct list_head request_list;
-       struct list_head link;
+       struct list_head active_link;
 
        u32 head;
        u32 tail;
 
        INIT_LIST_HEAD(&ring->request_list);
        intel_ring_update_space(ring);
 
-       list_add(&ring->link, &engine->i915->gt.rings);
-
        return ring;
 }
 
 static void mock_ring_free(struct intel_ring *ring)
 {
-       list_del(&ring->link);
-
        kfree(ring);
 }
 
 
        if (!i915->priorities)
                goto err_dependencies;
 
-       mutex_lock(&i915->drm.struct_mutex);
-       INIT_LIST_HEAD(&i915->gt.rings);
        INIT_LIST_HEAD(&i915->gt.timelines);
+       INIT_LIST_HEAD(&i915->gt.active_rings);
+
+       mutex_lock(&i915->drm.struct_mutex);
        err = i915_gem_timeline_init__global(i915);
        if (err) {
                mutex_unlock(&i915->drm.struct_mutex);