]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
drm/xe/migrate: prevent infinite recursion
authorMatthew Auld <matthew.auld@intel.com>
Thu, 31 Jul 2025 09:38:09 +0000 (10:38 +0100)
committerMatthew Auld <matthew.auld@intel.com>
Thu, 7 Aug 2025 15:59:18 +0000 (16:59 +0100)
If the buf + offset is not aligned to XE_CAHELINE_BYTES we fallback to
using a bounce buffer. However the bounce buffer here is allocated on
the stack, and the only alignment requirement here is that it's
naturally aligned to u8, and not XE_CACHELINE_BYTES. If the bounce
buffer is also misaligned we then recurse back into the function again,
however the new bounce buffer might also not be aligned, and might never
be until we eventually blow through the stack, as we keep recursing.

Instead of using the stack use kmalloc, which should respect the
power-of-two alignment request here. Fixes a kernel panic when
triggering this path through eudebug.

v2 (Stuart):
 - Add build bug check for power-of-two restriction
 - s/EINVAL/ENOMEM/

Fixes: 270172f64b11 ("drm/xe: Update xe_ttm_access_memory to use GPU for non-visible access")
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Maciej Patelczyk <maciej.patelczyk@intel.com>
Cc: Stuart Summers <stuart.summers@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Stuart Summers <stuart.summers@intel.com>
Link: https://lore.kernel.org/r/20250731093807.207572-6-matthew.auld@intel.com
drivers/gpu/drm/xe/xe_migrate.c

index 0f9636a060839eb91b1ff6566d74fac98cf28b30..1e9ff18656db96b1505ce82004caae01157a3cbe 100644 (file)
@@ -2017,15 +2017,19 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
        if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) ||
            !IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) {
                int buf_offset = 0;
+               void *bounce;
+               int err;
+
+               BUILD_BUG_ON(!is_power_of_2(XE_CACHELINE_BYTES));
+               bounce = kmalloc(XE_CACHELINE_BYTES, GFP_KERNEL);
+               if (!bounce)
+                       return -ENOMEM;
 
                /*
                 * Less than ideal for large unaligned access but this should be
                 * fairly rare, can fixup if this becomes common.
                 */
                do {
-                       u8 bounce[XE_CACHELINE_BYTES];
-                       void *ptr = (void *)bounce;
-                       int err;
                        int copy_bytes = min_t(int, bytes_left,
                                               XE_CACHELINE_BYTES -
                                               (offset & XE_CACHELINE_MASK));
@@ -2034,22 +2038,22 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
                        err = xe_migrate_access_memory(m, bo,
                                                       offset &
                                                       ~XE_CACHELINE_MASK,
-                                                      (void *)ptr,
-                                                      sizeof(bounce), 0);
+                                                      bounce,
+                                                      XE_CACHELINE_BYTES, 0);
                        if (err)
-                               return err;
+                               break;
 
                        if (write) {
-                               memcpy(ptr + ptr_offset, buf + buf_offset, copy_bytes);
+                               memcpy(bounce + ptr_offset, buf + buf_offset, copy_bytes);
 
                                err = xe_migrate_access_memory(m, bo,
                                                               offset & ~XE_CACHELINE_MASK,
-                                                              (void *)ptr,
-                                                              sizeof(bounce), write);
+                                                              bounce,
+                                                              XE_CACHELINE_BYTES, write);
                                if (err)
-                                       return err;
+                                       break;
                        } else {
-                               memcpy(buf + buf_offset, ptr + ptr_offset,
+                               memcpy(buf + buf_offset, bounce + ptr_offset,
                                       copy_bytes);
                        }
 
@@ -2058,7 +2062,8 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
                        offset += copy_bytes;
                } while (bytes_left);
 
-               return 0;
+               kfree(bounce);
+               return err;
        }
 
        pagemap_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);