i915-y += i915_cmd_parser.o \
          i915_gem_batch_pool.o \
          i915_gem_context.o \
-         i915_gem_debug.o \
          i915_gem_dmabuf.o \
          i915_gem_evict.o \
          i915_gem_execbuffer.o \
 
 #define DRIVER_MINOR           6
 #define DRIVER_PATCHLEVEL      0
 
-#define WATCH_LISTS    0
-
 struct opregion_header;
 struct opregion_acpi;
 struct opregion_swsci;
        struct drm_mm_node *stolen;
        struct list_head global_list;
 
-       struct list_head engine_list[I915_NUM_ENGINES];
        /** Used in execbuf to temporarily hold a ref */
        struct list_head obj_exec_link;
 
                obj->tiling_mode != I915_TILING_NONE;
 }
 
-/* i915_gem_debug.c */
-#if WATCH_LISTS
-int i915_verify_lists(struct drm_device *dev);
-#else
-#define i915_verify_lists(dev) 0
-#endif
-
 /* i915_debugfs.c */
 #ifdef CONFIG_DEBUG_FS
 int i915_debugfs_register(struct drm_i915_private *dev_priv);
 
 
 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-static void
-i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
-static void
-i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int engine);
 
 static bool cpu_cache_is_coherent(struct drm_device *dev,
                                  enum i915_cache_level level)
        if (ret)
                return ret;
 
-       WARN_ON(i915_verify_lists(dev));
        return 0;
 }
 
        return ret;
 }
 
-static void
-i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
-                              struct drm_i915_gem_request *req)
-{
-       int idx = req->engine->id;
-
-       if (i915_gem_active_peek(&obj->last_read[idx],
-                                &obj->base.dev->struct_mutex) == req)
-               i915_gem_object_retire__read(obj, idx);
-       else if (i915_gem_active_peek(&obj->last_write,
-                                     &obj->base.dev->struct_mutex) == req)
-               i915_gem_object_retire__write(obj);
-
-       if (!i915_reset_in_progress(&req->i915->gpu_error))
-               i915_gem_request_retire_upto(req);
-}
-
 /**
  * Ensures that all rendering to the object has completed and the object is
  * safe to unbind from the GTT or access from the CPU.
        }
 
        for_each_active(active_mask, idx) {
-               struct drm_i915_gem_request *request;
-
-               request = i915_gem_active_peek(&active[idx],
-                                              &obj->base.dev->struct_mutex);
-               if (!request)
-                       continue;
-
-               ret = i915_wait_request(request);
+               ret = i915_gem_active_wait(&active[idx],
+                                          &obj->base.dev->struct_mutex);
                if (ret)
                        return ret;
-
-               i915_gem_object_retire_request(obj, request);
        }
 
        resv = i915_gem_object_get_dmabuf_resv(obj);
                ret = __i915_wait_request(requests[i], true, NULL, rps);
        mutex_lock(&dev->struct_mutex);
 
-       for (i = 0; i < n; i++) {
-               if (ret == 0)
-                       i915_gem_object_retire_request(obj, requests[i]);
+       for (i = 0; i < n; i++)
                i915_gem_request_put(requests[i]);
-       }
 
        return ret;
 }
                i915_gem_object_get(obj);
        obj->active |= intel_engine_flag(engine);
 
-       list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
        i915_gem_active_set(&obj->last_read[engine->id], req);
 
        list_move_tail(&vma->vm_link, &vma->vm->active_list);
 }
 
 static void
-i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
+i915_gem_object_retire__write(struct i915_gem_active *active,
+                             struct drm_i915_gem_request *request)
 {
-       GEM_BUG_ON(!i915_gem_active_isset(&obj->last_write));
-       GEM_BUG_ON(!(obj->active &
-                    intel_engine_flag(i915_gem_active_get_engine(&obj->last_write,
-                                                                 &obj->base.dev->struct_mutex))));
+       struct drm_i915_gem_object *obj =
+               container_of(active, struct drm_i915_gem_object, last_write);
 
-       i915_gem_active_set(&obj->last_write, NULL);
        intel_fb_obj_flush(obj, true, ORIGIN_CS);
 }
 
 static void
-i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int idx)
+i915_gem_object_retire__read(struct i915_gem_active *active,
+                            struct drm_i915_gem_request *request)
 {
-       struct intel_engine_cs *engine;
+       int idx = request->engine->id;
+       struct drm_i915_gem_object *obj =
+               container_of(active, struct drm_i915_gem_object, last_read[idx]);
        struct i915_vma *vma;
 
-       GEM_BUG_ON(!i915_gem_active_isset(&obj->last_read[idx]));
-       GEM_BUG_ON(!(obj->active & (1 << idx)));
-
-       list_del_init(&obj->engine_list[idx]);
-       i915_gem_active_set(&obj->last_read[idx], NULL);
-
-       engine = i915_gem_active_get_engine(&obj->last_write,
-                                           &obj->base.dev->struct_mutex);
-       if (engine && engine->id == idx)
-               i915_gem_object_retire__write(obj);
+       GEM_BUG_ON((obj->active & (1 << idx)) == 0);
 
        obj->active &= ~(1 << idx);
        if (obj->active)
         * so that we don't steal from recently used but inactive objects
         * (unless we are forced to ofc!)
         */
-       list_move_tail(&obj->global_list,
-                      &to_i915(obj->base.dev)->mm.bound_list);
+       list_move_tail(&obj->global_list, &request->i915->mm.bound_list);
 
        list_for_each_entry(vma, &obj->vma_list, obj_link) {
                if (!list_empty(&vma->vm_link))
                        list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
        }
 
-       i915_gem_active_set(&obj->last_fence, NULL);
        i915_gem_object_put(obj);
 }
 
 {
        struct intel_ring *ring;
 
-       while (!list_empty(&engine->active_list)) {
-               struct drm_i915_gem_object *obj;
-
-               obj = list_first_entry(&engine->active_list,
-                                      struct drm_i915_gem_object,
-                                      engine_list[engine->id]);
-
-               i915_gem_object_retire__read(obj, engine->id);
-       }
-
        /* Mark all pending requests as complete so that any concurrent
         * (lockless) lookup doesn't try and wait upon the request as we
         * reset it.
        i915_gem_context_reset(dev);
 
        i915_gem_restore_fences(dev);
-
-       WARN_ON(i915_verify_lists(dev));
 }
 
 /**
 void
 i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
 {
-       WARN_ON(i915_verify_lists(engine->dev));
-
-       /* Retire requests first as we use it above for the early return.
-        * If we retire requests last, we may use a later seqno and so clear
-        * the requests lists without clearing the active list, leading to
-        * confusion.
-        */
        while (!list_empty(&engine->request_list)) {
                struct drm_i915_gem_request *request;
 
 
                i915_gem_request_retire_upto(request);
        }
-
-       /* Move any buffers on the active list that are no longer referenced
-        * by the ringbuffer to the flushing/inactive lists as appropriate,
-        * before we free the context associated with the requests.
-        */
-       while (!list_empty(&engine->active_list)) {
-               struct drm_i915_gem_object *obj;
-
-               obj = list_first_entry(&engine->active_list,
-                                      struct drm_i915_gem_object,
-                                      engine_list[engine->id]);
-
-               if (!list_empty(&i915_gem_active_peek(&obj->last_read[engine->id],
-                                                     &obj->base.dev->struct_mutex)->link))
-                       break;
-
-               i915_gem_object_retire__read(obj, engine->id);
-       }
-
-       WARN_ON(i915_verify_lists(engine->dev));
 }
 
 void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
 }
 
 static int
-__i915_gem_object_sync(struct drm_i915_gem_object *obj,
-                      struct drm_i915_gem_request *to,
+__i915_gem_object_sync(struct drm_i915_gem_request *to,
                       struct drm_i915_gem_request *from)
 {
        int ret;
        if (to->engine == from->engine)
                return 0;
 
-       if (i915_gem_request_completed(from))
-               return 0;
-
        if (!i915.semaphores) {
                ret = __i915_wait_request(from,
                                          from->i915->mm.interruptible,
                                          NO_WAITBOOST);
                if (ret)
                        return ret;
-
-               i915_gem_object_retire_request(obj, from);
        } else {
                int idx = intel_engine_sync_index(from->engine, to->engine);
                if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
                if (!request)
                        continue;
 
-               ret = __i915_gem_object_sync(obj, to, request);
+               ret = __i915_gem_object_sync(to, request);
                if (ret)
                        return ret;
        }
                        return ret;
        }
 
-       WARN_ON(i915_verify_lists(dev));
        return 0;
 }
 
 
        INIT_LIST_HEAD(&obj->global_list);
        for (i = 0; i < I915_NUM_ENGINES; i++)
-               INIT_LIST_HEAD(&obj->engine_list[i]);
+               init_request_active(&obj->last_read[i],
+                                   i915_gem_object_retire__read);
+       init_request_active(&obj->last_write,
+                           i915_gem_object_retire__write);
+       init_request_active(&obj->last_fence, NULL);
        INIT_LIST_HEAD(&obj->obj_exec_link);
        INIT_LIST_HEAD(&obj->vma_list);
        INIT_LIST_HEAD(&obj->batch_pool_link);
 static void
 init_engine_lists(struct intel_engine_cs *engine)
 {
-       INIT_LIST_HEAD(&engine->active_list);
        INIT_LIST_HEAD(&engine->request_list);
 }
 
 
+++ /dev/null
-/*
- * Copyright © 2008 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- *    Keith Packard <keithp@keithp.com>
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/i915_drm.h>
-#include "i915_drv.h"
-
-#if WATCH_LISTS
-int
-i915_verify_lists(struct drm_device *dev)
-{
-       static int warned;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_i915_gem_object *obj;
-       struct intel_engine_cs *engine;
-       int err = 0;
-
-       if (warned)
-               return 0;
-
-       for_each_engine(engine, dev_priv) {
-               list_for_each_entry(obj, &engine->active_list,
-                                   engine_list[engine->id]) {
-                       if (obj->base.dev != dev ||
-                           !atomic_read(&obj->base.refcount.refcount)) {
-                               DRM_ERROR("%s: freed active obj %p\n",
-                                         engine->name, obj);
-                               err++;
-                               break;
-                       } else if (!obj->active ||
-                                  obj->last_read_req[engine->id] == NULL) {
-                               DRM_ERROR("%s: invalid active obj %p\n",
-                                         engine->name, obj);
-                               err++;
-                       } else if (obj->base.write_domain) {
-                               DRM_ERROR("%s: invalid write obj %p (w %x)\n",
-                                         engine->name,
-                                         obj, obj->base.write_domain);
-                               err++;
-                       }
-               }
-       }
-
-       return warned = err;
-}
-#endif /* WATCH_LIST */
 
 static int
 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
 {
-       int ret;
-
-       ret = i915_gem_active_wait(&obj->last_fence,
-                                  &obj->base.dev->struct_mutex);
-       if (ret)
-               return ret;
-
-       i915_gem_active_set(&obj->last_fence, NULL);
-       return 0;
+       return i915_gem_active_retire(&obj->last_fence,
+                                     &obj->base.dev->struct_mutex);
 }
 
 /**
 
  *
  */
 
+#include <linux/prefetch.h>
+
 #include "i915_drv.h"
 
 static const char *i915_fence_get_driver_name(struct fence *fence)
        request->pid = NULL;
 }
 
+void i915_gem_retire_noop(struct i915_gem_active *active,
+                         struct drm_i915_gem_request *request)
+{
+       /* Space left intentionally blank */
+}
+
 static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 {
+       struct i915_gem_active *active, *next;
+
        trace_i915_gem_request_retire(request);
        list_del_init(&request->link);
 
         */
        request->ring->last_retired_head = request->postfix;
 
+       /* Walk through the active list, calling retire on each. This allows
+        * objects to track their GPU activity and mark themselves as idle
+        * when their *last* active request is completed (updating state
+        * tracking lists for eviction, active references for GEM, etc).
+        *
+        * As the ->retire() may free the node, we decouple it first and
+        * pass along the auxiliary information (to avoid dereferencing
+        * the node after the callback).
+        */
+       list_for_each_entry_safe(active, next, &request->active_list, link) {
+               /* In microbenchmarks or focusing upon time inside the kernel,
+                * we may spend an inordinate amount of time simply handling
+                * the retirement of requests and processing their callbacks.
+                * Of which, this loop itself is particularly hot due to the
+                * cache misses when jumping around the list of i915_gem_active.
+                * So we try to keep this loop as streamlined as possible and
+                * also prefetch the next i915_gem_active to try and hide
+                * the likely cache miss.
+                */
+               prefetchw(next);
+
+               INIT_LIST_HEAD(&active->link);
+               active->request = NULL;
+
+               active->retire(active, request);
+       }
+
        i915_gem_request_remove_from_client(request);
 
        if (request->previous_context) {
 
                i915_gem_request_retire(tmp);
        } while (tmp != req);
-
-       WARN_ON(i915_verify_lists(engine->dev));
 }
 
 static int i915_gem_check_wedge(unsigned int reset_counter, bool interruptible)
                   engine->fence_context,
                   seqno);
 
+       INIT_LIST_HEAD(&req->active_list);
        req->i915 = dev_priv;
        req->engine = engine;
        req->ctx = i915_gem_context_get(ctx);
 
        might_sleep();
 
-       if (list_empty(&req->link))
-               return 0;
-
        if (i915_gem_request_completed(req))
                return 0;
 
 {
        int ret;
 
-       GEM_BUG_ON(!req);
        lockdep_assert_held(&req->i915->drm.struct_mutex);
+       GEM_BUG_ON(list_empty(&req->link));
 
-       ret = __i915_wait_request(req, req->i915->mm.interruptible, NULL, NULL);
+       ret = __i915_wait_request(req,
+                                 req->i915->mm.interruptible,
+                                 NULL,
+                                 NULL);
        if (ret)
                return ret;
 
 
         * error state dump only).
         */
        struct drm_i915_gem_object *batch_obj;
+       struct list_head active_list;
 
        /** Time at which this request was emitted, in jiffies. */
        unsigned long emitted_jiffies;
 int __i915_wait_request(struct drm_i915_gem_request *req,
                        bool interruptible,
                        s64 *timeout,
-                       struct intel_rps_client *rps);
-int __must_check i915_wait_request(struct drm_i915_gem_request *req);
+                       struct intel_rps_client *rps)
+       __attribute__((nonnull(1)));
+
+int __must_check
+i915_wait_request(struct drm_i915_gem_request *req)
+       __attribute__((nonnull));
 
 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
 
  * can then perform any action, such as delayed freeing of an active
  * resource including itself.
  */
+struct i915_gem_active;
+
+typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
+                                  struct drm_i915_gem_request *);
+
 struct i915_gem_active {
        struct drm_i915_gem_request *request;
+       struct list_head link;
+       i915_gem_retire_fn retire;
 };
 
+void i915_gem_retire_noop(struct i915_gem_active *,
+                         struct drm_i915_gem_request *request);
+
+/**
+ * init_request_active - prepares the activity tracker for use
+ * @active - the active tracker
+ * @func - a callback when then the tracker is retired (becomes idle),
+ *         can be NULL
+ *
+ * init_request_active() prepares the embedded @active struct for use as
+ * an activity tracker, that is for tracking the last known active request
+ * associated with it. When the last request becomes idle, when it is retired
+ * after completion, the optional callback @func is invoked.
+ */
+static inline void
+init_request_active(struct i915_gem_active *active,
+                   i915_gem_retire_fn retire)
+{
+       INIT_LIST_HEAD(&active->link);
+       active->retire = retire ?: i915_gem_retire_noop;
+}
+
 /**
  * i915_gem_active_set - updates the tracker to watch the current request
  * @active - the active tracker
 i915_gem_active_set(struct i915_gem_active *active,
                    struct drm_i915_gem_request *request)
 {
-       i915_gem_request_assign(&active->request, request);
+       list_move(&active->link, &request->active_list);
+       active->request = request;
 }
 
 static inline struct drm_i915_gem_request *
 }
 
 /**
- * i915_gem_active_peek - report the request being monitored
+ * i915_gem_active_peek - report the active request being monitored
  * @active - the active tracker
  *
- * i915_gem_active_peek() returns the current request being tracked, or NULL.
- * It does not obtain a reference on the request for the caller, so the
- * caller must hold struct_mutex.
+ * i915_gem_active_peek() returns the current request being tracked if
+ * still active, or NULL. It does not obtain a reference on the request
+ * for the caller, so the caller must hold struct_mutex.
  */
 static inline struct drm_i915_gem_request *
 i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
 {
-       return active->request;
+       struct drm_i915_gem_request *request;
+
+       request = active->request;
+       if (!request || i915_gem_request_completed(request))
+               return NULL;
+
+       return request;
 }
 
 /**
 static inline struct drm_i915_gem_request *
 i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
 {
-       struct drm_i915_gem_request *request;
-
-       request = i915_gem_active_peek(active, mutex);
-       if (!request || i915_gem_request_completed(request))
-               return NULL;
-
-       return i915_gem_request_get(request);
+       return i915_gem_request_get(i915_gem_active_peek(active, mutex));
 }
 
 /**
 i915_gem_active_is_idle(const struct i915_gem_active *active,
                        struct mutex *mutex)
 {
-       struct drm_i915_gem_request *request;
-
-       request = i915_gem_active_peek(active, mutex);
-       if (!request || i915_gem_request_completed(request))
-               return true;
-
-       return false;
+       return !i915_gem_active_peek(active, mutex);
 }
 
 /**
  * i915_gem_active_wait() waits until the request is completed before
  * returning. Note that it does not guarantee that the request is
  * retired first, see i915_gem_active_retire().
+ *
+ * i915_gem_active_wait() returns immediately if the active
+ * request is already complete.
  */
 static inline int __must_check
 i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
        if (!request)
                return 0;
 
-       return i915_wait_request(request);
+       return __i915_wait_request(request, true, NULL, NULL);
 }
 
 /**
  * tracker is idle, the function returns immediately.
  */
 static inline int __must_check
-i915_gem_active_retire(const struct i915_gem_active *active,
+i915_gem_active_retire(struct i915_gem_active *active,
                       struct mutex *mutex)
 {
-       return i915_gem_active_wait(active, mutex);
+       struct drm_i915_gem_request *request;
+       int ret;
+
+       request = active->request;
+       if (!request)
+               return 0;
+
+       ret = __i915_wait_request(request, true, NULL, NULL);
+       if (ret)
+               return ret;
+
+       list_del_init(&active->link);
+       active->request = NULL;
+       active->retire(active, request);
+
+       return 0;
 }
 
 /* Convenience functions for peeking at state inside active's request whilst
 
  */
 void intel_engine_setup_common(struct intel_engine_cs *engine)
 {
-       INIT_LIST_HEAD(&engine->active_list);
        INIT_LIST_HEAD(&engine->request_list);
        INIT_LIST_HEAD(&engine->buffers);
        INIT_LIST_HEAD(&engine->execlist_queue);
 
        bool disable_lite_restore_wa;
        u32 ctx_desc_template;
 
-       /**
-        * List of objects currently involved in rendering from the
-        * ringbuffer.
-        *
-        * Includes buffers having the contents of their GPU caches
-        * flushed, not necessarily primitives.  last_read_req
-        * represents when the rendering involved will be completed.
-        *
-        * A reference is held on the buffer while on this list.
-        */
-       struct list_head active_list;
-
        /**
         * List of breadcrumbs associated with GPU requests currently
         * outstanding.