]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
drm/xe/migrate: Populate struct drm_pagemap_addr array
authorFrancois Dugast <francois.dugast@intel.com>
Tue, 5 Aug 2025 13:59:06 +0000 (15:59 +0200)
committerFrancois Dugast <francois.dugast@intel.com>
Wed, 6 Aug 2025 11:35:05 +0000 (13:35 +0200)
Workaround to ensure all addresses are populated in the array as
this is expected when creating the copy batch. This is required
because the migrate layer does not support 2MB GPU pages yet. A
proper fix will come in a follow-up.

Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20250805140028.599361-6-francois.dugast@intel.com
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
drivers/gpu/drm/xe/xe_migrate.c

index 6a0f04411ae0d14cb8e5e57020776eb46f7b2cb3..0f9636a060839eb91b1ff6566d74fac98cf28b30 100644 (file)
@@ -1802,6 +1802,7 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
        unsigned int pitch = len >= PAGE_SIZE && !(len & ~PAGE_MASK) ?
                PAGE_SIZE : 4;
        int err;
+       unsigned long i, j;
 
        if (drm_WARN_ON(&xe->drm, (len & XE_CACHELINE_MASK) ||
                        (sram_offset | vram_addr) & XE_CACHELINE_MASK))
@@ -1818,6 +1819,24 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
                return ERR_PTR(err);
        }
 
+       /*
+        * If the order of a struct drm_pagemap_addr entry is greater than 0,
+        * the entry is populated by GPU pagemap but subsequent entries within
+        * the range of that order are not populated.
+        * build_pt_update_batch_sram() expects a fully populated array of
+        * struct drm_pagemap_addr. Ensure this is the case even with higher
+        * orders.
+        */
+       for (i = 0; i < npages;) {
+               unsigned int order = sram_addr[i].order;
+
+               for (j = 1; j < NR_PAGES(order) && i + j < npages; j++)
+                       if (!sram_addr[i + j].addr)
+                               sram_addr[i + j].addr = sram_addr[i].addr + j * PAGE_SIZE;
+
+               i += NR_PAGES(order);
+       }
+
        build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
                                   sram_addr, len + sram_offset);