]> www.infradead.org Git - nvme.git/commitdiff
drm/i915: Use a single page table lock for each gtt.
authorMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Tue, 23 Mar 2021 15:50:29 +0000 (16:50 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Wed, 24 Mar 2021 16:30:37 +0000 (17:30 +0100)
We may create page table objects on the fly, but we may need to
wait with the ww lock held. Instead of waiting on a freed obj
lock, ensure we have the same lock for each object to keep
-EDEADLK working. This ensures that i915_vma_pin_ww can lock
the page tables when required.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-41-maarten.lankhorst@linux.intel.com
drivers/gpu/drm/i915/gt/intel_ggtt.c
drivers/gpu/drm/i915/gt/intel_gtt.c
drivers/gpu/drm/i915/gt/intel_gtt.h
drivers/gpu/drm/i915/gt/intel_ppgtt.c
drivers/gpu/drm/i915/i915_vma.c

index 86d843eb1b32dbade1669c46e53793a80418b998..c56320b2c4b440f3870e328954ba310bb9d0e492 100644 (file)
@@ -647,7 +647,9 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
        if (err)
                goto err_ppgtt;
 
+       i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
        err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
+       i915_gem_object_unlock(ppgtt->vm.scratch[0]);
        if (err)
                goto err_stash;
 
@@ -734,6 +736,7 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
 
        mutex_unlock(&ggtt->vm.mutex);
        i915_address_space_fini(&ggtt->vm);
+       dma_resv_fini(&ggtt->vm.resv);
 
        arch_phys_wc_del(ggtt->mtrr);
 
@@ -1115,6 +1118,7 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
        ggtt->vm.gt = gt;
        ggtt->vm.i915 = i915;
        ggtt->vm.dma = i915->drm.dev;
+       dma_resv_init(&ggtt->vm.resv);
 
        if (INTEL_GEN(i915) <= 5)
                ret = i915_gmch_probe(ggtt);
@@ -1122,8 +1126,10 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
                ret = gen6_gmch_probe(ggtt);
        else
                ret = gen8_gmch_probe(ggtt);
-       if (ret)
+       if (ret) {
+               dma_resv_fini(&ggtt->vm.resv);
                return ret;
+       }
 
        if ((ggtt->vm.total - 1) >> 32) {
                drm_err(&i915->drm,
index 444d9bacfafd16f86b22368cd31f47b359bd0c03..941f8af016d623ceb048c59aa6ec7fa88bad14c4 100644 (file)
 
 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
 {
+       struct drm_i915_gem_object *obj;
+
        if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
                i915_gem_shrink_all(vm->i915);
 
-       return i915_gem_object_create_internal(vm->i915, sz);
+       obj = i915_gem_object_create_internal(vm->i915, sz);
+       /* ensure all dma objects have the same reservation class */
+       if (!IS_ERR(obj))
+               obj->base.resv = &vm->resv;
+       return obj;
 }
 
 int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
 {
        int err;
 
+       i915_gem_object_lock(obj, NULL);
+       err = i915_gem_object_pin_pages(obj);
+       i915_gem_object_unlock(obj);
+       if (err)
+               return err;
+
+       i915_gem_object_make_unshrinkable(obj);
+       return 0;
+}
+
+int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
+{
+       int err;
+
        err = i915_gem_object_pin_pages(obj);
        if (err)
                return err;
@@ -56,6 +76,20 @@ void __i915_vm_close(struct i915_address_space *vm)
        mutex_unlock(&vm->mutex);
 }
 
+/* lock the vm into the current ww, if we lock one, we lock all */
+int i915_vm_lock_objects(struct i915_address_space *vm,
+                        struct i915_gem_ww_ctx *ww)
+{
+       if (vm->scratch[0]->base.resv == &vm->resv) {
+               return i915_gem_object_lock(vm->scratch[0], ww);
+       } else {
+               struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+               /* We borrowed the scratch page from ggtt, take the top level object */
+               return i915_gem_object_lock(ppgtt->pd->pt.base, ww);
+       }
+}
+
 void i915_address_space_fini(struct i915_address_space *vm)
 {
        drm_mm_takedown(&vm->mm);
@@ -69,6 +103,7 @@ static void __i915_vm_release(struct work_struct *work)
 
        vm->cleanup(vm);
        i915_address_space_fini(vm);
+       dma_resv_fini(&vm->resv);
 
        kfree(vm);
 }
@@ -98,6 +133,7 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
        mutex_init(&vm->mutex);
        lockdep_set_subclass(&vm->mutex, subclass);
        i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
+       dma_resv_init(&vm->resv);
 
        GEM_BUG_ON(!vm->total);
        drm_mm_init(&vm->mm, 0, vm->total);
index af90090c3d1827591aca5788f28f7cfb2f7a271b..8f7c49efa1900b1325282ad4af7c1b29abf129cb 100644 (file)
@@ -238,6 +238,7 @@ struct i915_address_space {
        atomic_t open;
 
        struct mutex mutex; /* protects vma and our lists */
+       struct dma_resv resv; /* reservation lock for all pd objects, and buffer pool */
 #define VM_CLASS_GGTT 0
 #define VM_CLASS_PPGTT 1
 
@@ -346,6 +347,9 @@ struct i915_ppgtt {
 
 #define i915_is_ggtt(vm) ((vm)->is_ggtt)
 
+int __must_check
+i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
+
 static inline bool
 i915_vm_is_4lvl(const struct i915_address_space *vm)
 {
@@ -522,6 +526,7 @@ struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
 struct i915_page_directory *__alloc_pd(int npde);
 
 int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
+int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
 
 void free_px(struct i915_address_space *vm,
             struct i915_page_table *pt, int lvl);
index 3f940ae27028764f449b8738d8f705b5c76fbc67..ccf4b8539ab9737a22c886efec26c42c2d8efa1c 100644 (file)
@@ -262,7 +262,7 @@ int i915_vm_pin_pt_stash(struct i915_address_space *vm,
 
        for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
                for (pt = stash->pt[n]; pt; pt = pt->stash) {
-                       err = pin_pt_dma(vm, pt->base);
+                       err = pin_pt_dma_locked(vm, pt->base);
                        if (err)
                                return err;
                }
@@ -304,6 +304,7 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
        ppgtt->vm.dma = i915->drm.dev;
        ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
 
+       dma_resv_init(&ppgtt->vm.resv);
        i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
 
        ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma;
index 6401299d421aeb914991f2f7b2262036aaaa8ddc..07490db51cdc3039bc05946a4b919e3d6c5df726 100644 (file)
@@ -884,6 +884,11 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
                wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
 
        if (flags & vma->vm->bind_async_flags) {
+               /* lock VM */
+               err = i915_vm_lock_objects(vma->vm, ww);
+               if (err)
+                       goto err_rpm;
+
                work = i915_vma_work();
                if (!work) {
                        err = -ENOMEM;