const struct drm_i915_gem_object_ops *ops,
                          struct lock_class_key *key, unsigned flags)
 {
-       mutex_init(&obj->mm.lock);
-
        spin_lock_init(&obj->vma.lock);
        INIT_LIST_HEAD(&obj->vma.list);
 
 
         */
        if (IS_ENABLED(CONFIG_LOCKDEP) &&
            kref_read(&obj->base.refcount) > 0)
-               lockdep_assert_held(&obj->mm.lock);
+               assert_object_held(obj);
 }
 
 static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
 static inline int __must_check
 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 {
-       might_lock(&obj->mm.lock);
+       assert_object_held(obj);
 
        if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
                return 0;
 }
 
 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
-int __i915_gem_object_put_pages_locked(struct drm_i915_gem_object *obj);
 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
 
 
                 * Protects the pages and their use. Do not use directly, but
                 * instead go through the pin/unpin interfaces.
                 */
-               struct mutex lock;
                atomic_t pages_pin_count;
                atomic_t shrink_pin;
 
 
                struct list_head *list;
                unsigned long flags;
 
-               lockdep_assert_held(&obj->mm.lock);
+               assert_object_held(obj);
                spin_lock_irqsave(&i915->mm.obj_lock, flags);
 
                i915->mm.shrink_count++;
 {
        int err;
 
-       err = mutex_lock_interruptible(&obj->mm.lock);
-       if (err)
-               return err;
+       assert_object_held(obj);
 
        assert_object_held_shared(obj);
 
 
                err = ____i915_gem_object_get_pages(obj);
                if (err)
-                       goto unlock;
+                       return err;
 
                smp_mb__before_atomic();
        }
        atomic_inc(&obj->mm.pages_pin_count);
 
-unlock:
-       mutex_unlock(&obj->mm.lock);
-       return err;
+       return 0;
 }
 
 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
        return pages;
 }
 
-int __i915_gem_object_put_pages_locked(struct drm_i915_gem_object *obj)
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
 {
        struct sg_table *pages;
 
        return 0;
 }
 
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
-{
-       int err;
-
-       if (i915_gem_object_has_pinned_pages(obj))
-               return -EBUSY;
-
-       /* May be called by shrinker from within get_pages() (on another bo) */
-       mutex_lock(&obj->mm.lock);
-       err = __i915_gem_object_put_pages_locked(obj);
-       mutex_unlock(&obj->mm.lock);
-
-       return err;
-}
-
 /* The 'mapping' part of i915_gem_object_pin_map() below */
 static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
                                      enum i915_map_type type)
            !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
                return ERR_PTR(-ENXIO);
 
-       err = mutex_lock_interruptible(&obj->mm.lock);
-       if (err)
-               return ERR_PTR(err);
+       assert_object_held(obj);
 
        pinned = !(type & I915_MAP_OVERRIDE);
        type &= ~I915_MAP_OVERRIDE;
                        GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
 
                        err = ____i915_gem_object_get_pages(obj);
-                       if (err) {
-                               ptr = ERR_PTR(err);
-                               goto out_unlock;
-                       }
+                       if (err)
+                               return ERR_PTR(err);
 
                        smp_mb__before_atomic();
                }
                obj->mm.mapping = page_pack_bits(ptr, type);
        }
 
-out_unlock:
-       mutex_unlock(&obj->mm.lock);
        return ptr;
 
 err_unpin:
        atomic_dec(&obj->mm.pages_pin_count);
-       goto out_unlock;
+       return ptr;
 }
 
 void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
 
        if (err)
                return err;
 
-       err = mutex_lock_interruptible(&obj->mm.lock);
-       if (err)
-               return err;
-
-       if (unlikely(!i915_gem_object_has_struct_page(obj)))
-               goto out;
-
-       if (obj->mm.madv != I915_MADV_WILLNEED) {
-               err = -EFAULT;
-               goto out;
-       }
+       if (obj->mm.madv != I915_MADV_WILLNEED)
+               return -EFAULT;
 
-       if (i915_gem_object_has_tiling_quirk(obj)) {
-               err = -EFAULT;
-               goto out;
-       }
+       if (i915_gem_object_has_tiling_quirk(obj))
+               return -EFAULT;
 
-       if (obj->mm.mapping || i915_gem_object_has_pinned_pages(obj)) {
-               err = -EBUSY;
-               goto out;
-       }
+       if (obj->mm.mapping || i915_gem_object_has_pinned_pages(obj))
+               return -EBUSY;
 
        if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
                drm_dbg(obj->base.dev,
                        "Attempting to obtain a purgeable object\n");
-               err = -EFAULT;
-               goto out;
+               return -EFAULT;
        }
 
-       err = i915_gem_object_shmem_to_phys(obj);
-
-out:
-       mutex_unlock(&obj->mm.lock);
-       return err;
+       return i915_gem_object_shmem_to_phys(obj);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 
         */
 
        with_intel_runtime_pm(&i915->runtime_pm, wakeref)
-               i915_gem_shrink(i915, -1UL, NULL, ~0);
+               i915_gem_shrink(NULL, i915, -1UL, NULL, ~0);
        i915_gem_drain_freed_objects(i915);
 
        wbinvd_on_all_cpus();
 
                                goto err_sg;
                        }
 
-                       i915_gem_shrink(i915, 2 * page_count, NULL, *s++);
+                       i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++);
 
                        /*
                         * We've tried hard to allocate the memory by reaping
 
  * The number of pages of backing storage actually released.
  */
 unsigned long
-i915_gem_shrink(struct drm_i915_private *i915,
+i915_gem_shrink(struct i915_gem_ww_ctx *ww,
+               struct drm_i915_private *i915,
                unsigned long target,
                unsigned long *nr_scanned,
                unsigned int shrink)
        intel_wakeref_t wakeref = 0;
        unsigned long count = 0;
        unsigned long scanned = 0;
+       int err;
 
        trace_i915_gem_shrink(i915, target, shrink);
 
 
                        spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
 
-                       if (unsafe_drop_pages(obj, shrink) &&
-                           mutex_trylock(&obj->mm.lock)) {
+                       err = 0;
+                       if (unsafe_drop_pages(obj, shrink)) {
                                /* May arrive from get_pages on another bo */
-                               if (!__i915_gem_object_put_pages_locked(obj)) {
+                               if (!ww) {
+                                       if (!i915_gem_object_trylock(obj))
+                                               goto skip;
+                               } else {
+                                       err = i915_gem_object_lock(obj, ww);
+                                       if (err)
+                                               goto skip;
+                               }
+
+                               if (!__i915_gem_object_put_pages(obj)) {
                                        try_to_writeback(obj, shrink);
                                        count += obj->base.size >> PAGE_SHIFT;
                                }
-                               mutex_unlock(&obj->mm.lock);
+                               if (!ww)
+                                       i915_gem_object_unlock(obj);
                        }
 
                        dma_resv_prune(obj->base.resv);
 
                        scanned += obj->base.size >> PAGE_SHIFT;
+skip:
                        i915_gem_object_put(obj);
 
                        spin_lock_irqsave(&i915->mm.obj_lock, flags);
+                       if (err)
+                               break;
                }
                list_splice_tail(&still_in_list, phase->list);
                spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+               if (err)
+                       return err;
        }
 
        if (shrink & I915_SHRINK_BOUND)
        unsigned long freed = 0;
 
        with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
-               freed = i915_gem_shrink(i915, -1UL, NULL,
+               freed = i915_gem_shrink(NULL, i915, -1UL, NULL,
                                        I915_SHRINK_BOUND |
                                        I915_SHRINK_UNBOUND);
        }
 
        sc->nr_scanned = 0;
 
-       freed = i915_gem_shrink(i915,
+       freed = i915_gem_shrink(NULL, i915,
                                sc->nr_to_scan,
                                &sc->nr_scanned,
                                I915_SHRINK_BOUND |
                intel_wakeref_t wakeref;
 
                with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
-                       freed += i915_gem_shrink(i915,
+                       freed += i915_gem_shrink(NULL, i915,
                                                 sc->nr_to_scan - sc->nr_scanned,
                                                 &sc->nr_scanned,
                                                 I915_SHRINK_ACTIVE |
 
        freed_pages = 0;
        with_intel_runtime_pm(&i915->runtime_pm, wakeref)
-               freed_pages += i915_gem_shrink(i915, -1UL, NULL,
+               freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
                                               I915_SHRINK_BOUND |
                                               I915_SHRINK_UNBOUND |
                                               I915_SHRINK_WRITEBACK);
        intel_wakeref_t wakeref;
 
        with_intel_runtime_pm(&i915->runtime_pm, wakeref)
-               freed_pages += i915_gem_shrink(i915, -1UL, NULL,
+               freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
                                               I915_SHRINK_BOUND |
                                               I915_SHRINK_UNBOUND |
                                               I915_SHRINK_VMAPS);
 
 #include <linux/bits.h>
 
 struct drm_i915_private;
+struct i915_gem_ww_ctx;
 struct mutex;
 
 /* i915_gem_shrinker.c */
-unsigned long i915_gem_shrink(struct drm_i915_private *i915,
+unsigned long i915_gem_shrink(struct i915_gem_ww_ctx *ww,
+                             struct drm_i915_private *i915,
                              unsigned long target,
                              unsigned long *nr_scanned,
                              unsigned flags);
 
         * pages to prevent them being swapped out and causing corruption
         * due to the change in swizzling.
         */
-       mutex_lock(&obj->mm.lock);
        if (i915_gem_object_has_pages(obj) &&
            obj->mm.madv == I915_MADV_WILLNEED &&
            i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
                        i915_gem_object_set_tiling_quirk(obj);
                }
        }
-       mutex_unlock(&obj->mm.lock);
 
        spin_lock(&obj->vma.lock);
        for_each_ggtt_vma(vma, obj) {
 
        if (GEM_WARN_ON(i915_gem_object_has_pinned_pages(obj)))
                return -EBUSY;
 
-       mutex_lock(&obj->mm.lock);
+       assert_object_held(obj);
 
        pages = __i915_gem_object_unset_pages(obj);
        if (!IS_ERR_OR_NULL(pages))
 
        if (get_pages)
                err = ____i915_gem_object_get_pages(obj);
-       mutex_unlock(&obj->mm.lock);
 
        return err;
 }
 
 
        fs_reclaim_acquire(GFP_KERNEL);
        if (val & DROP_BOUND)
-               i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
+               i915_gem_shrink(NULL, i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
 
        if (val & DROP_UNBOUND)
-               i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
+               i915_gem_shrink(NULL, i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
 
        if (val & DROP_SHRINK_ALL)
                i915_gem_shrink_all(i915);
 
        if (err)
                goto out;
 
-       err = mutex_lock_interruptible(&obj->mm.lock);
-       if (err)
-               goto out_ww;
-
        if (i915_gem_object_has_pages(obj) &&
            i915_gem_object_is_tiled(obj) &&
            i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
                i915_gem_object_truncate(obj);
 
        args->retained = obj->mm.madv != __I915_MADV_PURGED;
-       mutex_unlock(&obj->mm.lock);
 
-out_ww:
        i915_gem_object_unlock(obj);
 out:
        i915_gem_object_put(obj);
 
                 * the DMA remapper, i915_gem_shrink will return 0.
                 */
                GEM_BUG_ON(obj->mm.pages == pages);
-       } while (i915_gem_shrink(to_i915(obj->base.dev),
+       } while (i915_gem_shrink(NULL, to_i915(obj->base.dev),
                                 obj->base.size >> PAGE_SHIFT, NULL,
                                 I915_SHRINK_BOUND |
                                 I915_SHRINK_UNBOUND));