]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
drm/xe/svm: Migrate folios when possible
authorFrancois Dugast <francois.dugast@intel.com>
Tue, 5 Aug 2025 13:59:07 +0000 (15:59 +0200)
committerFrancois Dugast <francois.dugast@intel.com>
Wed, 6 Aug 2025 11:35:08 +0000 (13:35 +0200)
The DMA mapping can now correspond to a folio (order > 0), so move
the iterator by the number of pages in the folio in order to migrate
all pages at once.

This requires forcing contiguous memory for SVM BOs, which greatly
simplifies the code and enables 2MB device page support, allowing a
major performance improvement. Negative effects like extra eviction
are unlikely as SVM BOs have a maximal size of 2MB.

v2:
- Improve commit message (Matthew Brost)
- Fix increment, chunk, assert match (Matthew Brost)

Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20250805140028.599361-7-francois.dugast@intel.com
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
drivers/gpu/drm/xe/xe_bo.c
drivers/gpu/drm/xe/xe_svm.c

index 1a72561f987c80eeb95fdd0b1a3ec3b3dde1b333..6fea39842e1e628700bd38aa8484ef191b8b2776 100644 (file)
@@ -200,6 +200,8 @@ static bool force_contiguous(u32 bo_flags)
        else if (bo_flags & XE_BO_FLAG_PINNED &&
                 !(bo_flags & XE_BO_FLAG_PINNED_LATE_RESTORE))
                return true; /* needs vmap */
+       else if (bo_flags & XE_BO_FLAG_CPU_ADDR_MIRROR)
+               return true;
 
        /*
         * For eviction / restore on suspend / resume objects pinned in VRAM
index 1d097e76aabc7ecf82d98dfc7d370e9010ca6923..e35c6d4def20cc3a2c2ff999f1eb0068e2f9317c 100644 (file)
@@ -383,6 +383,14 @@ static int xe_svm_copy(struct page **pages,
                        }
 
                        match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr;
+                       /* Expected with contiguous memory */
+                       xe_assert(vr->xe, match);
+
+                       if (pagemap_addr[i].order) {
+                               i += NR_PAGES(pagemap_addr[i].order) - 1;
+                               chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
+                               last = (i + 1) == npages;
+                       }
                }
 
                /*