* CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
  */
 
-static void i965_write_fence_reg(struct drm_device *dev, int reg,
-                                struct drm_i915_gem_object *obj)
+#define pipelined 0
+
+static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
+                                struct i915_vma *vma)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t fence_reg_lo, fence_reg_hi;
        int fence_pitch_shift;
+       u64 val;
 
-       if (INTEL_INFO(dev)->gen >= 6) {
-               fence_reg_lo = FENCE_REG_GEN6_LO(reg);
-               fence_reg_hi = FENCE_REG_GEN6_HI(reg);
+       if (INTEL_INFO(fence->i915)->gen >= 6) {
+               fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
+               fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
                fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
+
        } else {
-               fence_reg_lo = FENCE_REG_965_LO(reg);
-               fence_reg_hi = FENCE_REG_965_HI(reg);
+               fence_reg_lo = FENCE_REG_965_LO(fence->id);
+               fence_reg_hi = FENCE_REG_965_HI(fence->id);
                fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
        }
 
-       /* To w/a incoherency with non-atomic 64-bit register updates,
-        * we split the 64-bit update into two 32-bit writes. In order
-        * for a partial fence not to be evaluated between writes, we
-        * precede the update with write to turn off the fence register,
-        * and only enable the fence as the last step.
-        *
-        * For extra levels of paranoia, we make sure each step lands
-        * before applying the next step.
-        */
-       I915_WRITE(fence_reg_lo, 0);
-       POSTING_READ(fence_reg_lo);
-
-       if (obj) {
-               struct i915_vma *vma = i915_gem_object_to_ggtt(obj, NULL);
-               unsigned int tiling = i915_gem_object_get_tiling(obj);
-               unsigned int stride = i915_gem_object_get_stride(obj);
-               u32 size = vma->node.size;
-               u32 row_size = stride * (tiling == I915_TILING_Y ? 32 : 8);
-               u64 val;
-
-               /* Adjust fence size to match tiled area */
-               size = rounddown(size, row_size);
+       val = 0;
+       if (vma) {
+               unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
+               bool is_y_tiled = tiling == I915_TILING_Y;
+               unsigned int stride = i915_gem_object_get_stride(vma->obj);
+               u32 row_size = stride * (is_y_tiled ? 32 : 8);
+               u32 size = rounddown((u32)vma->node.size, row_size);
 
                val = ((vma->node.start + size - 4096) & 0xfffff000) << 32;
                val |= vma->node.start & 0xfffff000;
                val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
-               if (tiling == I915_TILING_Y)
-                       val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+               if (is_y_tiled)
+                       val |= BIT(I965_FENCE_TILING_Y_SHIFT);
                val |= I965_FENCE_REG_VALID;
+       }
 
-               I915_WRITE(fence_reg_hi, val >> 32);
-               POSTING_READ(fence_reg_hi);
+       if (!pipelined) {
+               struct drm_i915_private *dev_priv = fence->i915;
 
-               I915_WRITE(fence_reg_lo, val);
+               /* To w/a incoherency with non-atomic 64-bit register updates,
+                * we split the 64-bit update into two 32-bit writes. In order
+                * for a partial fence not to be evaluated between writes, we
+                * precede the update with write to turn off the fence register,
+                * and only enable the fence as the last step.
+                *
+                * For extra levels of paranoia, we make sure each step lands
+                * before applying the next step.
+                */
+               I915_WRITE(fence_reg_lo, 0);
+               POSTING_READ(fence_reg_lo);
+
+               I915_WRITE(fence_reg_hi, upper_32_bits(val));
+               I915_WRITE(fence_reg_lo, lower_32_bits(val));
                POSTING_READ(fence_reg_lo);
-       } else {
-               I915_WRITE(fence_reg_hi, 0);
-               POSTING_READ(fence_reg_hi);
        }
 }
 
-static void i915_write_fence_reg(struct drm_device *dev, int reg,
-                                struct drm_i915_gem_object *obj)
+static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
+                                struct i915_vma *vma)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val;
 
-       if (obj) {
-               struct i915_vma *vma = i915_gem_object_to_ggtt(obj, NULL);
-               unsigned int tiling = i915_gem_object_get_tiling(obj);
-               unsigned int stride = i915_gem_object_get_stride(obj);
+       val = 0;
+       if (vma) {
+               unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
+               bool is_y_tiled = tiling == I915_TILING_Y;
+               unsigned int stride = i915_gem_object_get_stride(vma->obj);
                int pitch_val;
                int tile_width;
 
                     i915_vma_is_map_and_fenceable(vma),
                     vma->node.size);
 
-               if (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+               if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
                        tile_width = 128;
                else
                        tile_width = 512;
                pitch_val = ffs(pitch_val) - 1;
 
                val = vma->node.start;
-               if (tiling == I915_TILING_Y)
-                       val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+               if (is_y_tiled)
+                       val |= BIT(I830_FENCE_TILING_Y_SHIFT);
                val |= I915_FENCE_SIZE_BITS(vma->node.size);
                val |= pitch_val << I830_FENCE_PITCH_SHIFT;
                val |= I830_FENCE_REG_VALID;
-       } else
-               val = 0;
+       }
 
-       I915_WRITE(FENCE_REG(reg), val);
-       POSTING_READ(FENCE_REG(reg));
+       if (!pipelined) {
+               struct drm_i915_private *dev_priv = fence->i915;
+               i915_reg_t reg = FENCE_REG(fence->id);
+
+               I915_WRITE(reg, val);
+               POSTING_READ(reg);
+       }
 }
 
-static void i830_write_fence_reg(struct drm_device *dev, int reg,
-                               struct drm_i915_gem_object *obj)
+static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
+                                struct i915_vma *vma)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val;
 
-       if (obj) {
-               struct i915_vma *vma = i915_gem_object_to_ggtt(obj, NULL);
-               unsigned int tiling = i915_gem_object_get_tiling(obj);
-               unsigned int stride = i915_gem_object_get_stride(obj);
+       val = 0;
+       if (vma) {
+               unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
+               bool is_y_tiled = tiling == I915_TILING_Y;
+               unsigned int stride = i915_gem_object_get_stride(vma->obj);
                u32 pitch_val;
 
                WARN((vma->node.start & ~I830_FENCE_START_MASK) ||
                pitch_val = ffs(pitch_val) - 1;
 
                val = vma->node.start;
-               if (tiling == I915_TILING_Y)
-                       val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+               if (is_y_tiled)
+                       val |= BIT(I830_FENCE_TILING_Y_SHIFT);
                val |= I830_FENCE_SIZE_BITS(vma->node.size);
                val |= pitch_val << I830_FENCE_PITCH_SHIFT;
                val |= I830_FENCE_REG_VALID;
-       } else
-               val = 0;
+       }
 
-       I915_WRITE(FENCE_REG(reg), val);
-       POSTING_READ(FENCE_REG(reg));
-}
+       if (!pipelined) {
+               struct drm_i915_private *dev_priv = fence->i915;
+               i915_reg_t reg = FENCE_REG(fence->id);
 
-inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
-{
-       return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
+               I915_WRITE(reg, val);
+               POSTING_READ(reg);
+       }
 }
 
-static void i915_gem_write_fence(struct drm_device *dev, int reg,
-                                struct drm_i915_gem_object *obj)
+static void fence_write(struct drm_i915_fence_reg *fence,
+                       struct i915_vma *vma)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       /* Ensure that all CPU reads are completed before installing a fence
-        * and all writes before removing the fence.
+       /* Previous access through the fence register is marshalled by
+        * the mb() inside the fault handlers (i915_gem_release_mmaps)
+        * and explicitly managed for internal users.
         */
-       if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
-               mb();
-
-       WARN(obj &&
-            (!i915_gem_object_get_stride(obj) ||
-             !i915_gem_object_get_tiling(obj)),
-            "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
-            i915_gem_object_get_stride(obj),
-            i915_gem_object_get_tiling(obj));
-
-       if (IS_GEN2(dev))
-               i830_write_fence_reg(dev, reg, obj);
-       else if (IS_GEN3(dev))
-               i915_write_fence_reg(dev, reg, obj);
-       else if (INTEL_INFO(dev)->gen >= 4)
-               i965_write_fence_reg(dev, reg, obj);
-
-       /* And similarly be paranoid that no direct access to this region
-        * is reordered to before the fence is installed.
+
+       if (IS_GEN2(fence->i915))
+               i830_write_fence_reg(fence, vma);
+       else if (IS_GEN3(fence->i915))
+               i915_write_fence_reg(fence, vma);
+       else
+               i965_write_fence_reg(fence, vma);
+
+       /* Access through the fenced region afterwards is
+        * ordered by the posting reads whilst writing the registers.
         */
-       if (i915_gem_object_needs_mb(obj))
-               mb();
-}
 
-static inline int fence_number(struct drm_i915_private *dev_priv,
-                              struct drm_i915_fence_reg *fence)
-{
-       return fence - dev_priv->fence_regs;
+       fence->dirty = false;
 }
 
-static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
-                                        struct drm_i915_fence_reg *fence,
-                                        bool enable)
+static int fence_update(struct drm_i915_fence_reg *fence,
+                       struct i915_vma *vma)
 {
-       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-       int reg = fence_number(dev_priv, fence);
+       int ret;
 
-       i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
+       if (vma) {
+               if (!i915_vma_is_map_and_fenceable(vma))
+                       return -EINVAL;
 
-       if (enable) {
-               obj->fence_reg = reg;
-               fence->obj = obj;
-               list_move_tail(&fence->link, &dev_priv->mm.fence_list);
-       } else {
-               obj->fence_reg = I915_FENCE_REG_NONE;
-               fence->obj = NULL;
-               list_del_init(&fence->link);
+               if (WARN(!i915_gem_object_get_stride(vma->obj) ||
+                        !i915_gem_object_get_tiling(vma->obj),
+                        "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+                        i915_gem_object_get_stride(vma->obj),
+                        i915_gem_object_get_tiling(vma->obj)))
+                       return -EINVAL;
+
+               ret = i915_gem_active_retire(&vma->last_fence,
+                                            &vma->obj->base.dev->struct_mutex);
+               if (ret)
+                       return ret;
        }
-       obj->fence_dirty = false;
-}
 
-static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
-{
-       if (i915_gem_object_is_tiled(obj))
-               i915_gem_release_mmap(obj);
+       if (fence->vma) {
+               ret = i915_gem_active_retire(&fence->vma->last_fence,
+                                     &fence->vma->obj->base.dev->struct_mutex);
+               if (ret)
+                       return ret;
+       }
 
-       /* As we do not have an associated fence register, we will force
-        * a tiling change if we ever need to acquire one.
-        */
-       obj->fence_dirty = false;
-       obj->fence_reg = I915_FENCE_REG_NONE;
-}
+       if (fence->vma && fence->vma != vma) {
+               /* Ensure that all userspace CPU access is completed before
+                * stealing the fence.
+                */
+               i915_gem_release_mmap(fence->vma->obj);
 
-static int
-i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
-{
-       return i915_gem_active_retire(&obj->last_fence,
-                                     &obj->base.dev->struct_mutex);
+               fence->vma->fence = NULL;
+               fence->vma = NULL;
+
+               list_move(&fence->link, &fence->i915->mm.fence_list);
+       }
+
+       fence_write(fence, vma);
+
+       if (vma) {
+               if (fence->vma != vma) {
+                       vma->fence = fence;
+                       fence->vma = vma;
+               }
+
+               list_move_tail(&fence->link, &fence->i915->mm.fence_list);
+       }
+
+       return 0;
 }
 
 /**
- * i915_gem_object_put_fence - force-remove fence for an object
- * @obj: object to map through a fence reg
+ * i915_vma_put_fence - force-remove fence for a VMA
+ * @vma: vma to map linearly (not through a fence reg)
  *
  * This function force-removes any fence from the given object, which is useful
  * if the kernel wants to do untiled GTT access.
  * 0 on success, negative error code on failure.
  */
 int
-i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+i915_vma_put_fence(struct i915_vma *vma)
 {
-       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-       struct drm_i915_fence_reg *fence;
-       int ret;
-
-       ret = i915_gem_object_wait_fence(obj);
-       if (ret)
-               return ret;
+       struct drm_i915_fence_reg *fence = vma->fence;
 
-       if (obj->fence_reg == I915_FENCE_REG_NONE)
+       if (!fence)
                return 0;
 
-       fence = &dev_priv->fence_regs[obj->fence_reg];
-
        if (fence->pin_count)
                return -EBUSY;
 
-       i915_gem_object_fence_lost(obj);
-       i915_gem_object_update_fence(obj, fence, false);
-
-       return 0;
+       return fence_update(fence, NULL);
 }
 
-static struct drm_i915_fence_reg *
-i915_find_fence_reg(struct drm_device *dev)
+static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_i915_fence_reg *reg, *avail;
-       int i;
-
-       /* First try to find a free reg */
-       avail = NULL;
-       for (i = 0; i < dev_priv->num_fence_regs; i++) {
-               reg = &dev_priv->fence_regs[i];
-               if (!reg->obj)
-                       return reg;
-
-               if (!reg->pin_count)
-                       avail = reg;
-       }
-
-       if (avail == NULL)
-               goto deadlock;
+       struct drm_i915_fence_reg *fence;
 
-       /* None available, try to steal one or wait for a user to finish */
-       list_for_each_entry(reg, &dev_priv->mm.fence_list, link) {
-               if (reg->pin_count)
+       list_for_each_entry(fence, &dev_priv->mm.fence_list, link) {
+               if (fence->pin_count)
                        continue;
 
-               return reg;
+               return fence;
        }
 
-deadlock:
        /* Wait for completion of pending flips which consume fences */
-       if (intel_has_pending_fb_unpin(dev))
+       if (intel_has_pending_fb_unpin(&dev_priv->drm))
                return ERR_PTR(-EAGAIN);
 
        return ERR_PTR(-EDEADLK);
 }
 
 /**
- * i915_gem_object_get_fence - set up fencing for an object
- * @obj: object to map through a fence reg
+ * i915_vma_get_fence - set up fencing for a vma
+ * @vma: vma to map through a fence reg
  *
  * When mapping objects through the GTT, userspace wants to be able to write
  * to them without having to worry about swizzling if the object is tiled.
  * 0 on success, negative error code on failure.
  */
 int
-i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
+i915_vma_get_fence(struct i915_vma *vma)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       bool enable = i915_gem_object_is_tiled(obj);
-       struct drm_i915_fence_reg *reg;
-       int ret;
-
-       /* Have we updated the tiling parameters upon the object and so
-        * will need to serialise the write to the associated fence register?
-        */
-       if (obj->fence_dirty) {
-               ret = i915_gem_object_wait_fence(obj);
-               if (ret)
-                       return ret;
-       }
+       struct drm_i915_fence_reg *fence;
+       struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
 
        /* Just update our place in the LRU if our fence is getting reused. */
-       if (obj->fence_reg != I915_FENCE_REG_NONE) {
-               reg = &dev_priv->fence_regs[obj->fence_reg];
-               if (!obj->fence_dirty) {
-                       list_move_tail(®->link, &dev_priv->mm.fence_list);
+       if (vma->fence) {
+               fence = vma->fence;
+               if (!fence->dirty) {
+                       list_move_tail(&fence->link,
+                                      &fence->i915->mm.fence_list);
                        return 0;
                }
-       } else if (enable) {
-               reg = i915_find_fence_reg(dev);
-               if (IS_ERR(reg))
-                       return PTR_ERR(reg);
-
-               if (reg->obj) {
-                       struct drm_i915_gem_object *old = reg->obj;
-
-                       ret = i915_gem_object_wait_fence(old);
-                       if (ret)
-                               return ret;
-
-                       i915_gem_object_fence_lost(old);
-               }
+       } else if (set) {
+               fence = fence_find(to_i915(vma->vm->dev));
+               if (IS_ERR(fence))
+                       return PTR_ERR(fence);
        } else
                return 0;
 
-       i915_gem_object_update_fence(obj, reg, enable);
-
-       return 0;
-}
-
-/**
- * i915_gem_object_pin_fence - pin fencing state
- * @obj: object to pin fencing for
- *
- * This pins the fencing state (whether tiled or untiled) to make sure the
- * object is ready to be used as a scanout target. Fencing status must be
- * synchronize first by calling i915_gem_object_get_fence():
- *
- * The resulting fence pin reference must be released again with
- * i915_gem_object_unpin_fence().
- *
- * Returns:
- *
- * True if the object has a fence, false otherwise.
- */
-bool
-i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
-{
-       if (obj->fence_reg != I915_FENCE_REG_NONE) {
-               to_i915(obj->base.dev)->fence_regs[obj->fence_reg].pin_count++;
-               return true;
-       } else
-               return false;
-}
-
-/**
- * i915_gem_object_unpin_fence - unpin fencing state
- * @obj: object to unpin fencing for
- *
- * This releases the fence pin reference acquired through
- * i915_gem_object_pin_fence. It will handle both objects with and without an
- * attached fence correctly, callers do not need to distinguish this.
- */
-void
-i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
-{
-       if (obj->fence_reg != I915_FENCE_REG_NONE) {
-               struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-               WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
-               dev_priv->fence_regs[obj->fence_reg].pin_count--;
-       }
+       return fence_update(fence, set);
 }
 
 /**
                 * Commit delayed tiling changes if we have an object still
                 * attached to the fence, otherwise just clear the fence.
                 */
-               if (reg->obj) {
-                       i915_gem_object_update_fence(reg->obj, reg,
-                                                    i915_gem_object_get_tiling(reg->obj));
-               } else {
-                       i915_gem_write_fence(dev, i, NULL);
-               }
+               fence_write(reg, reg->vma);
        }
 }