]> www.infradead.org Git - users/hch/misc.git/commitdiff
drm/i915/selftests: Do not leak vm_area_struct on early return
authorKrzysztof Karas <krzysztof.karas@intel.com>
Wed, 30 Jul 2025 07:40:18 +0000 (07:40 +0000)
committerAndi Shyti <andi.shyti@kernel.org>
Sat, 9 Aug 2025 00:03:58 +0000 (23:03 -0100)
This structure may be leaked on early failure paths, so include
vm_munmap() call in them to avoid that.

Suggested-by: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Krzysztof Karas <krzysztof.karas@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://lore.kernel.org/r/d8277e3af09c388fa5dfbf5c9bb3985ae0b191a2.1753859971.git.krzysztof.karas@intel.com
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c

index c94b71a963b2c59da8e24b6cdc16a5940621d542..78734c404a6df3e350279f7ffb9c556f670d1149 100644 (file)
@@ -1096,32 +1096,20 @@ static int ___igt_mmap_migrate(struct drm_i915_private *i915,
                               unsigned long addr,
                               bool unfaultable)
 {
-       struct vm_area_struct *area;
-       int err = 0, i;
+       int i;
 
        pr_info("igt_mmap(%s, %d) @ %lx\n",
                obj->mm.region->name, I915_MMAP_TYPE_FIXED, addr);
 
-       mmap_read_lock(current->mm);
-       area = vma_lookup(current->mm, addr);
-       mmap_read_unlock(current->mm);
-       if (!area) {
-               pr_err("%s: Did not create a vm_area_struct for the mmap\n",
-                      obj->mm.region->name);
-               err = -EINVAL;
-               goto out_unmap;
-       }
-
        for (i = 0; i < obj->base.size / sizeof(u32); i++) {
                u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
                u32 x;
 
                if (get_user(x, ux)) {
-                       err = -EFAULT;
                        if (!unfaultable) {
                                pr_err("%s: Unable to read from mmap, offset:%zd\n",
                                       obj->mm.region->name, i * sizeof(x));
-                               goto out_unmap;
+                               return -EFAULT;
                        }
 
                        continue;
@@ -1130,37 +1118,29 @@ static int ___igt_mmap_migrate(struct drm_i915_private *i915,
                if (unfaultable) {
                        pr_err("%s: Faulted unmappable memory\n",
                               obj->mm.region->name);
-                       err = -EINVAL;
-                       goto out_unmap;
+                       return -EINVAL;
                }
 
                if (x != expand32(POISON_INUSE)) {
                        pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
                               obj->mm.region->name,
                               i * sizeof(x), x, expand32(POISON_INUSE));
-                       err = -EINVAL;
-                       goto out_unmap;
+                       return -EINVAL;
                }
 
                x = expand32(POISON_FREE);
                if (put_user(x, ux)) {
                        pr_err("%s: Unable to write to mmap, offset:%zd\n",
                               obj->mm.region->name, i * sizeof(x));
-                       err = -EFAULT;
-                       goto out_unmap;
+                       return -EFAULT;
                }
        }
 
-       if (unfaultable) {
-               if (err == -EFAULT)
-                       err = 0;
-       } else {
-               obj->flags &= ~I915_BO_ALLOC_GPU_ONLY;
-               err = wc_check(obj);
-       }
-out_unmap:
-       vm_munmap(addr, obj->base.size);
-       return err;
+       if (unfaultable)
+               return 0;
+
+       obj->flags &= ~I915_BO_ALLOC_GPU_ONLY;
+       return wc_check(obj);
 }
 
 #define IGT_MMAP_MIGRATE_TOPDOWN     (1 << 0)
@@ -1176,6 +1156,7 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
        struct drm_i915_private *i915 = placements[0]->i915;
        struct drm_i915_gem_object *obj;
        struct i915_request *rq = NULL;
+       struct vm_area_struct *area;
        unsigned long addr;
        LIST_HEAD(objects);
        u64 offset;
@@ -1207,20 +1188,30 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
                goto out_put;
        }
 
+       mmap_read_lock(current->mm);
+       area = vma_lookup(current->mm, addr);
+       mmap_read_unlock(current->mm);
+       if (!area) {
+               pr_err("%s: Did not create a vm_area_struct for the mmap\n",
+                      obj->mm.region->name);
+               err = -EINVAL;
+               goto out_addr;
+       }
+
        if (flags & IGT_MMAP_MIGRATE_FILL) {
                err = igt_fill_mappable(placements[0], &objects);
                if (err)
-                       goto out_put;
+                       goto out_addr;
        }
 
        err = i915_gem_object_lock(obj, NULL);
        if (err)
-               goto out_put;
+               goto out_addr;
 
        err = i915_gem_object_pin_pages(obj);
        if (err) {
                i915_gem_object_unlock(obj);
-               goto out_put;
+               goto out_addr;
        }
 
        err = intel_context_migrate_clear(to_gt(i915)->migrate.context, NULL,
@@ -1237,7 +1228,7 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
        }
        i915_gem_object_unlock(obj);
        if (err)
-               goto out_put;
+               goto out_addr;
 
        if (flags & IGT_MMAP_MIGRATE_EVICTABLE)
                igt_make_evictable(&objects);
@@ -1245,16 +1236,16 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
        if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) {
                err = i915_gem_object_lock(obj, NULL);
                if (err)
-                       goto out_put;
+                       goto out_addr;
 
                /*
-                * Ensure we only simulate the gpu failuire when faulting the
+                * Ensure we only simulate the gpu failure when faulting the
                 * pages.
                 */
                err = i915_gem_object_wait_moving_fence(obj, true);
                i915_gem_object_unlock(obj);
                if (err)
-                       goto out_put;
+                       goto out_addr;
                i915_ttm_migrate_set_failure_modes(true, false);
        }
 
@@ -1298,6 +1289,9 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
                }
        }
 
+out_addr:
+       vm_munmap(addr, obj->base.size);
+
 out_put:
        i915_gem_object_put(obj);
        igt_close_objects(i915, &objects);