From: Francois Dugast Date: Tue, 5 Aug 2025 13:59:06 +0000 (+0200) Subject: drm/xe/migrate: Populate struct drm_pagemap_addr array X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=321d42032567e6ccbcec971b5547ae8ee1d4b861;p=users%2Fjedix%2Flinux-maple.git drm/xe/migrate: Populate struct drm_pagemap_addr array Workaround to ensure all addresses are populated in the array as this is expected when creating the copy batch. This is required because the migrate layer does not support 2MB GPU pages yet. A proper fix will come in a follow-up. Reviewed-by: Matthew Brost Link: https://lore.kernel.org/r/20250805140028.599361-6-francois.dugast@intel.com Signed-off-by: Francois Dugast --- diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 6a0f04411ae0..0f9636a06083 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -1802,6 +1802,7 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m, unsigned int pitch = len >= PAGE_SIZE && !(len & ~PAGE_MASK) ? PAGE_SIZE : 4; int err; + unsigned long i, j; if (drm_WARN_ON(&xe->drm, (len & XE_CACHELINE_MASK) || (sram_offset | vram_addr) & XE_CACHELINE_MASK)) @@ -1818,6 +1819,24 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m, return ERR_PTR(err); } + /* + * If the order of a struct drm_pagemap_addr entry is greater than 0, + * the entry is populated by GPU pagemap but subsequent entries within + * the range of that order are not populated. + * build_pt_update_batch_sram() expects a fully populated array of + * struct drm_pagemap_addr. Ensure this is the case even with higher + * orders. + */ + for (i = 0; i < npages;) { + unsigned int order = sram_addr[i].order; + + for (j = 1; j < NR_PAGES(order) && i + j < npages; j++) + if (!sram_addr[i + j].addr) + sram_addr[i + j].addr = sram_addr[i].addr + j * PAGE_SIZE; + + i += NR_PAGES(order); + } + build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE, sram_addr, len + sram_offset);