We are motivated to avoid using a bitfield for obj->active for a couple
of reasons. Firstly, we wish to document our lockless read of obj->active
using READ_ONCE inside i915_gem_busy_ioctl() and that requires an
integral type (i.e. not a bitfield). Secondly, gcc produces abysmal code
when presented with a bitfield and that shows up high on the profiles of
request tracking (mainly due to excess memory traffic as it converts
the bitfield to a register and back and generates frequent AGI in the
process).
v2: BIT, break up a long line in compute the other engines, new paint
for i915_gem_object_is_active (now i915_gem_object_get_active).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470324762-2545-23-git-send-email-chris@chris-wilson.co.uk
 
 static char get_active_flag(struct drm_i915_gem_object *obj)
 {
-       return obj->active ? '*' : ' ';
+       return i915_gem_object_is_active(obj) ? '*' : ' ';
 }
 
 static char get_pin_flag(struct drm_i915_gem_object *obj)
 
 
        struct list_head batch_pool_link;
 
+       unsigned long flags;
        /**
         * This is set if the object is on the active lists (has pending
         * rendering and so a non-zero seqno), and is not set if it i s on
         * inactive (ready to be unbound) list.
         */
-       unsigned int active:I915_NUM_ENGINES;
+#define I915_BO_ACTIVE_SHIFT 0
+#define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1)
+#define __I915_BO_ACTIVE(bo) \
+       ((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
 
        /**
         * This is set if the object has been written to since last bound
        return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
 }
 
+static inline unsigned long
+i915_gem_object_get_active(const struct drm_i915_gem_object *obj)
+{
+       return (obj->flags >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK;
+}
+
+static inline bool
+i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
+{
+       return i915_gem_object_get_active(obj);
+}
+
+static inline void
+i915_gem_object_set_active(struct drm_i915_gem_object *obj, int engine)
+{
+       obj->flags |= BIT(engine + I915_BO_ACTIVE_SHIFT);
+}
+
+static inline void
+i915_gem_object_clear_active(struct drm_i915_gem_object *obj, int engine)
+{
+       obj->flags &= ~BIT(engine + I915_BO_ACTIVE_SHIFT);
+}
+
+static inline bool
+i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj,
+                                 int engine)
+{
+       return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT);
+}
+
 /*
  * Optimised SGL iterator for GEM objects
  */
 
 
        if (!readonly) {
                active = obj->last_read;
-               active_mask = obj->active;
+               active_mask = i915_gem_object_get_active(obj);
        } else {
                active_mask = 1;
                active = &obj->last_write;
        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
        BUG_ON(!dev_priv->mm.interruptible);
 
-       active_mask = obj->active;
+       active_mask = i915_gem_object_get_active(obj);
        if (!active_mask)
                return 0;
 
        struct drm_i915_gem_object *obj =
                container_of(active, struct drm_i915_gem_object, last_read[idx]);
 
-       GEM_BUG_ON((obj->active & (1 << idx)) == 0);
+       GEM_BUG_ON(!i915_gem_object_has_active_engine(obj, idx));
 
-       obj->active &= ~(1 << idx);
-       if (obj->active)
+       i915_gem_object_clear_active(obj, idx);
+       if (i915_gem_object_is_active(obj))
                return;
 
        /* Bump our place on the bound list to keep it roughly in LRU order
                return -ENOENT;
        }
 
-       if (!obj->active)
+       if (!i915_gem_object_is_active(obj))
                goto out;
 
        for (i = 0; i < I915_NUM_ENGINES; i++) {
 
        lockdep_assert_held(&obj->base.dev->struct_mutex);
 
-       active_mask = obj->active;
+       active_mask = i915_gem_object_get_active(obj);
        if (!active_mask)
                return 0;
 
         * become non-busy without any further actions.
         */
        args->busy = 0;
-       if (obj->active) {
+       if (i915_gem_object_is_active(obj)) {
                struct drm_i915_gem_request *req;
                int i;
 
 
 
 static bool object_is_idle(struct drm_i915_gem_object *obj)
 {
-       unsigned long active = obj->active;
+       unsigned long active = i915_gem_object_get_active(obj);
        int idx;
 
        for_each_active(active, idx) {
        return ret;
 }
 
+static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
+{
+       unsigned int mask;
+
+       mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK;
+       mask <<= I915_BO_ACTIVE_SHIFT;
+
+       return mask;
+}
+
 static int
 i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
                                struct list_head *vmas)
 {
-       const unsigned other_rings = ~intel_engine_flag(req->engine);
+       const unsigned int other_rings = eb_other_engines(req);
        struct i915_vma *vma;
        uint32_t flush_domains = 0;
        bool flush_chipset = false;
        list_for_each_entry(vma, vmas, exec_list) {
                struct drm_i915_gem_object *obj = vma->obj;
 
-               if (obj->active & other_rings) {
+               if (obj->flags & other_rings) {
                        ret = i915_gem_object_sync(obj, req);
                        if (ret)
                                return ret;
         * add the active reference first and queue for it to be dropped
         * *last*.
         */
-       if (obj->active == 0)
+       if (!i915_gem_object_is_active(obj))
                i915_gem_object_get(obj);
-       obj->active |= 1 << idx;
+       i915_gem_object_set_active(obj, idx);
        i915_gem_active_set(&obj->last_read[idx], req);
 
        if (flags & EXEC_OBJECT_WRITE) {
 
                            !is_vmalloc_addr(obj->mapping))
                                continue;
 
-                       if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
+                       if ((flags & I915_SHRINK_ACTIVE) == 0 &&
+                           i915_gem_object_is_active(obj))
                                continue;
 
                        if (!can_release_pages(obj))
                        count += obj->base.size >> PAGE_SHIFT;
 
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               if (!obj->active && can_release_pages(obj))
+               if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
                        count += obj->base.size >> PAGE_SHIFT;
        }
 
 
        struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
        int i, n;
 
-       if (!obj->active)
+       if (!i915_gem_object_is_active(obj))
                return;
 
        n = 0;