]> www.infradead.org Git - users/hch/misc.git/commitdiff
drm/i915: Clean-up outdated struct_mutex comments
authorLuiz Otavio Mello <luiz.mello@estudante.ufscar.br>
Mon, 8 Sep 2025 13:15:15 +0000 (09:15 -0400)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Tue, 9 Sep 2025 14:39:21 +0000 (10:39 -0400)
The struct_mutex will be removed from the DRM subsystem, as it was a
legacy BKL that was only used by i915 driver. After review, it was
concluded that its usage was no longer necessary

This patch updates various comments in the i915 codebase to
either remove or clarify references to struct_mutex, in order to
prevent future misunderstandings.

* i915_drv.h: Removed the statement that stolen_lock is the inner lock
  when overlaps with struct_mutex, since struct_mutex is no longer used
  in the driver.
* i915_gem.c: Removed parentheses suggesting usage of struct_mutex, which
  which is no longer used.

Signed-off-by: Luiz Otavio Mello <luiz.mello@estudante.ufscar.br>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://lore.kernel.org/r/20250908131518.36625-8-luiz.mello@estudante.ufscar.br
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c

index 55f13cb2c395b158b5ac3b5bbf7138699d84afd2..50fdecb79b492e52390632a7e67aba92cd164656 100644 (file)
@@ -116,8 +116,7 @@ struct i915_gem_mm {
        struct intel_memory_region *stolen_region;
        /** Memory allocator for GTT stolen memory */
        struct drm_mm stolen;
-       /** Protects the usage of the GTT stolen memory allocator. This is
-        * always the inner lock when overlapping with struct_mutex. */
+       /** Protects the usage of the GTT stolen memory allocator */
        struct mutex stolen_lock;
 
        /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
index 8c8d43451f35ecd6855c8b40ca69f359d15246d2..e14a0c3db999b9752af4d5a5b6d1b3b46922493d 100644 (file)
@@ -847,8 +847,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
        /*
         * Only called during RPM suspend. All users of the userfault_list
         * must be holding an RPM wakeref to ensure that this can not
-        * run concurrently with themselves (and use the struct_mutex for
-        * protection between themselves).
+        * run concurrently with themselves.
         */
 
        list_for_each_entry_safe(obj, on,