]> www.infradead.org Git - users/hch/configfs.git/commitdiff
drm/xe/migrate: Add helper function to program identity map
authorAkshata Jahagirdar <akshata.jahagirdar@intel.com>
Wed, 17 Jul 2024 21:10:04 +0000 (21:10 +0000)
committerMatt Roper <matthew.d.roper@intel.com>
Thu, 18 Jul 2024 00:02:29 +0000 (17:02 -0700)
Add an helper function to program identity map.

v2: Formatting nits

Signed-off-by: Akshata Jahagirdar <akshata.jahagirdar@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/91dc05f05bd33076fb9a9f74f8495b48d2abff53.1721250309.git.akshata.jahagirdar@intel.com
drivers/gpu/drm/xe/xe_migrate.c

index 85eec95c9bc271131223c511e46a94daa81019cf..49ad5d8443cf2ca4c50206d8bf6f1da2a5669550 100644 (file)
@@ -130,6 +130,51 @@ static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr)
        return addr + (256ULL << xe_pt_shift(2));
 }
 
+static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo,
+                                       u64 map_ofs, u64 vram_offset, u16 pat_index, u64 pt_2m_ofs)
+{
+       u64 pos, ofs, flags;
+       u64 entry;
+       /* XXX: Unclear if this should be usable_size? */
+       u64 vram_limit =  xe->mem.vram.actual_physical_size +
+               xe->mem.vram.dpa_base;
+       u32 level = 2;
+
+       ofs = map_ofs + XE_PAGE_SIZE * level + vram_offset * 8;
+       flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
+                                           true, 0);
+
+       xe_assert(xe, IS_ALIGNED(xe->mem.vram.usable_size, SZ_2M));
+
+       /*
+        * Use 1GB pages when possible, last chunk always use 2M
+        * pages as mixing reserved memory (stolen, WOCPM) with a single
+        * mapping is not allowed on certain platforms.
+        */
+       for (pos = xe->mem.vram.dpa_base; pos < vram_limit;
+            pos += SZ_1G, ofs += 8) {
+               if (pos + SZ_1G >= vram_limit) {
+                       entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs,
+                                                         pat_index);
+                       xe_map_wr(xe, &bo->vmap, ofs, u64, entry);
+
+                       flags = vm->pt_ops->pte_encode_addr(xe, 0,
+                                                           pat_index,
+                                                           level - 1,
+                                                           true, 0);
+
+                       for (ofs = pt_2m_ofs; pos < vram_limit;
+                            pos += SZ_2M, ofs += 8)
+                               xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
+                       break;  /* Ensure pos == vram_limit assert correct */
+               }
+
+               xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
+       }
+
+       xe_assert(xe, pos == vram_limit);
+}
+
 static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
                                 struct xe_vm *vm)
 {
@@ -253,47 +298,10 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
 
        /* Identity map the entire vram at 256GiB offset */
        if (IS_DGFX(xe)) {
-               u64 pos, ofs, flags;
-               /* XXX: Unclear if this should be usable_size? */
-               u64 vram_limit =  xe->mem.vram.actual_physical_size +
-                       xe->mem.vram.dpa_base;
-
-               level = 2;
-               ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8;
-               flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
-                                                   true, 0);
-
-               xe_assert(xe, IS_ALIGNED(xe->mem.vram.usable_size, SZ_2M));
-
-               /*
-                * Use 1GB pages when possible, last chunk always use 2M
-                * pages as mixing reserved memory (stolen, WOCPM) with a single
-                * mapping is not allowed on certain platforms.
-                */
-               for (pos = xe->mem.vram.dpa_base; pos < vram_limit;
-                    pos += SZ_1G, ofs += 8) {
-                       if (pos + SZ_1G >= vram_limit) {
-                               u64 pt31_ofs = bo->size - XE_PAGE_SIZE;
-
-                               entry = vm->pt_ops->pde_encode_bo(bo, pt31_ofs,
-                                                                 pat_index);
-                               xe_map_wr(xe, &bo->vmap, ofs, u64, entry);
-
-                               flags = vm->pt_ops->pte_encode_addr(xe, 0,
-                                                                   pat_index,
-                                                                   level - 1,
-                                                                   true, 0);
-
-                               for (ofs = pt31_ofs; pos < vram_limit;
-                                    pos += SZ_2M, ofs += 8)
-                                       xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
-                               break;  /* Ensure pos == vram_limit assert correct */
-                       }
-
-                       xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
-               }
+               u64 pt31_ofs = bo->size - XE_PAGE_SIZE;
 
-               xe_assert(xe, pos == vram_limit);
+               xe_migrate_program_identity(xe, vm, bo, map_ofs, 256, pat_index, pt31_ofs);
+               xe_assert(xe, (xe->mem.vram.actual_physical_size <= SZ_256G));
        }
 
        /*