}
        intel_frontbuffer_flush(new_bo->frontbuffer, ORIGIN_DIRTYFB);
 
-       ret = i915_vma_put_fence(vma);
-       if (ret)
-               goto out_unpin;
-
        if (!overlay->active) {
                u32 oconfig;
 
 
         * state and so involves less work.
         */
        if (atomic_read(&obj->bind_count)) {
+               struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
                /* Before we change the PTE, the GPU must not be accessing it.
                 * If we wait upon the object, we know that all the bound
                 * VMA are no longer active.
                if (ret)
                        return ret;
 
-               if (!HAS_LLC(to_i915(obj->base.dev)) &&
-                   cache_level != I915_CACHE_NONE) {
-                       /* Access to snoopable pages through the GTT is
+               if (!HAS_LLC(i915) && cache_level != I915_CACHE_NONE) {
+                       intel_wakeref_t wakeref =
+                               intel_runtime_pm_get(&i915->runtime_pm);
+
+                       /*
+                        * Access to snoopable pages through the GTT is
                         * incoherent and on some machines causes a hard
                         * lockup. Relinquish the CPU mmaping to force
                         * userspace to refault in the pages and we can
                         * then double check if the GTT mapping is still
                         * valid for that pointer access.
                         */
-                       i915_gem_object_release_mmap(obj);
+                       ret = mutex_lock_interruptible(&i915->ggtt.vm.mutex);
+                       if (ret) {
+                               intel_runtime_pm_put(&i915->runtime_pm,
+                                                    wakeref);
+                               return ret;
+                       }
 
-                       /* As we no longer need a fence for GTT access,
+                       if (obj->userfault_count)
+                               __i915_gem_object_release_mmap(obj);
+
+                       /*
+                        * As we no longer need a fence for GTT access,
                         * we can relinquish it now (and so prevent having
                         * to steal a fence from someone else on the next
                         * fence request). Note GPU activity would have
                         * supposed to be linear.
                         */
                        for_each_ggtt_vma(vma, obj) {
-                               ret = i915_vma_put_fence(vma);
+                               ret = i915_vma_revoke_fence(vma);
                                if (ret)
-                                       return ret;
+                                       break;
                        }
+                       mutex_unlock(&i915->ggtt.vm.mutex);
+                       intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+                       if (ret)
+                               return ret;
                } else {
-                       /* We either have incoherent backing store and
+                       /*
+                        * We either have incoherent backing store and
                         * so no GTT access or the architecture is fully
                         * coherent. In such cases, existing GTT mmaps
                         * ignore the cache bit in the PTE and we can
 
                struct i915_vma *vma;
                int err;
 
+               if (i915_gem_object_is_tiled(obj))
+                       return ERR_PTR(-EINVAL);
+
                if (use_cpu_reloc(cache, obj))
                        return NULL;
 
                        if (err) /* no inactive aperture space, use cpu reloc */
                                return NULL;
                } else {
-                       err = i915_vma_put_fence(vma);
-                       if (err) {
-                               i915_vma_unpin(vma);
-                               return ERR_PTR(err);
-                       }
-
                        cache->node.start = vma->node.start;
                        cache->node.mm = (void *)vma;
                }
 
                return ret;
 
        wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-       vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
-                                      PIN_MAPPABLE |
-                                      PIN_NONBLOCK /* NOWARN */ |
-                                      PIN_NOEVICT);
+       vma = ERR_PTR(-ENODEV);
+       if (!i915_gem_object_is_tiled(obj))
+               vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+                                              PIN_MAPPABLE |
+                                              PIN_NONBLOCK /* NOWARN */ |
+                                              PIN_NOEVICT);
        if (!IS_ERR(vma)) {
                node.start = i915_ggtt_offset(vma);
                node.allocated = false;
-               ret = i915_vma_put_fence(vma);
-               if (ret) {
-                       i915_vma_unpin(vma);
-                       vma = ERR_PTR(ret);
-               }
-       }
-       if (IS_ERR(vma)) {
+       } else {
                ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
                if (ret)
                        goto out_unlock;
                wakeref = intel_runtime_pm_get(rpm);
        }
 
-       vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
-                                      PIN_MAPPABLE |
-                                      PIN_NONBLOCK /* NOWARN */ |
-                                      PIN_NOEVICT);
+       vma = ERR_PTR(-ENODEV);
+       if (!i915_gem_object_is_tiled(obj))
+               vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+                                              PIN_MAPPABLE |
+                                              PIN_NONBLOCK /* NOWARN */ |
+                                              PIN_NOEVICT);
        if (!IS_ERR(vma)) {
                node.start = i915_ggtt_offset(vma);
                node.allocated = false;
-               ret = i915_vma_put_fence(vma);
-               if (ret) {
-                       i915_vma_unpin(vma);
-                       vma = ERR_PTR(ret);
-               }
-       }
-       if (IS_ERR(vma)) {
+       } else {
                ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
                if (ret)
                        goto out_rpm;
 
 }
 
 /**
- * i915_vma_put_fence - force-remove fence for a VMA
+ * i915_vma_revoke_fence - force-remove fence for a VMA
  * @vma: vma to map linearly (not through a fence reg)
  *
  * This function force-removes any fence from the given object, which is useful
  *
  * 0 on success, negative error code on failure.
  */
-int i915_vma_put_fence(struct i915_vma *vma)
+int i915_vma_revoke_fence(struct i915_vma *vma)
 {
-       struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
        struct i915_fence_reg *fence = vma->fence;
-       int err;
 
+       lockdep_assert_held(&vma->vm->mutex);
        if (!fence)
                return 0;
 
        if (atomic_read(&fence->pin_count))
                return -EBUSY;
 
-       err = mutex_lock_interruptible(&ggtt->vm.mutex);
-       if (err)
-               return err;
-
-       err = fence_update(fence, NULL);
-       mutex_unlock(&ggtt->vm.mutex);
-
-       return err;
+       return fence_update(fence, NULL);
 }
 
 static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
 
                GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
 
                /* release the fence reg _after_ flushing */
-               ret = i915_vma_put_fence(vma);
+               mutex_lock(&vma->vm->mutex);
+               ret = i915_vma_revoke_fence(vma);
+               mutex_unlock(&vma->vm->mutex);
                if (ret)
                        return ret;
 
 
  *
  * True if the vma has a fence, false otherwise.
  */
-int i915_vma_pin_fence(struct i915_vma *vma);
-int __must_check i915_vma_put_fence(struct i915_vma *vma);
+int __must_check i915_vma_pin_fence(struct i915_vma *vma);
+int __must_check i915_vma_revoke_fence(struct i915_vma *vma);
 
 static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
 {