]> www.infradead.org Git - users/hch/misc.git/commitdiff
drm/xe/madvise: Skip vma invalidation if mem attr are unchanged
authorHimal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Thu, 21 Aug 2025 17:31:00 +0000 (23:01 +0530)
committerHimal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Tue, 26 Aug 2025 05:55:36 +0000 (11:25 +0530)
If a VMA within the madvise input range already has the same memory
attribute as the one requested by the user, skip PTE zapping for that
VMA to avoid unnecessary invalidation.

v2 (Matthew Brost)
- fix skip_invalidation for new attributes
- s/u32/bool
- Remove unnecessary assignment  for kzalloc'ed

Suggested-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20250821173104.3030148-17-himal.prasad.ghimiray@intel.com
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
drivers/gpu/drm/xe/xe_vm_madvise.c
drivers/gpu/drm/xe/xe_vm_types.h

index 212a03178d65a9e5ee6ca9e02229676d1d01d98a..7813bdedacaa491b8e7433e78b06b5abe71ad0a0 100644 (file)
@@ -85,16 +85,20 @@ static void madvise_preferred_mem_loc(struct xe_device *xe, struct xe_vm *vm,
 
        for (i = 0; i < num_vmas; i++) {
                /*TODO: Extend attributes to bo based vmas */
-               if (!xe_vma_is_cpu_addr_mirror(vmas[i]))
-                       continue;
-
-               vmas[i]->attr.preferred_loc.devmem_fd = op->preferred_mem_loc.devmem_fd;
-
-               /* Till multi-device support is not added migration_policy
-                * is of no use and can be ignored.
-                */
-               vmas[i]->attr.preferred_loc.migration_policy =
+               if ((vmas[i]->attr.preferred_loc.devmem_fd == op->preferred_mem_loc.devmem_fd &&
+                    vmas[i]->attr.preferred_loc.migration_policy ==
+                    op->preferred_mem_loc.migration_policy) ||
+                   !xe_vma_is_cpu_addr_mirror(vmas[i])) {
+                       vmas[i]->skip_invalidation = true;
+               } else {
+                       vmas[i]->skip_invalidation = false;
+                       vmas[i]->attr.preferred_loc.devmem_fd = op->preferred_mem_loc.devmem_fd;
+                       /* Till multi-device support is not added migration_policy
+                        * is of no use and can be ignored.
+                        */
+                       vmas[i]->attr.preferred_loc.migration_policy =
                                                op->preferred_mem_loc.migration_policy;
+               }
        }
 }
 
@@ -109,17 +113,27 @@ static void madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
        xe_assert(vm->xe, op->atomic.val <= DRM_XE_ATOMIC_CPU);
 
        for (i = 0; i < num_vmas; i++) {
-               if ((xe_vma_is_userptr(vmas[i]) &&
-                    !(op->atomic.val == DRM_XE_ATOMIC_DEVICE &&
-                      xe->info.has_device_atomics_on_smem)))
+               if (xe_vma_is_userptr(vmas[i]) &&
+                   !(op->atomic.val == DRM_XE_ATOMIC_DEVICE &&
+                     xe->info.has_device_atomics_on_smem)) {
+                       vmas[i]->skip_invalidation = true;
                        continue;
+               }
+
+               if (vmas[i]->attr.atomic_access == op->atomic.val) {
+                       vmas[i]->skip_invalidation = true;
+               } else {
+                       vmas[i]->skip_invalidation = false;
+                       vmas[i]->attr.atomic_access = op->atomic.val;
+               }
 
                vmas[i]->attr.atomic_access = op->atomic.val;
 
                bo = xe_vma_bo(vmas[i]);
-               if (!bo)
+               if (!bo || bo->attr.atomic_access == op->atomic.val)
                        continue;
 
+               vmas[i]->skip_invalidation = false;
                xe_bo_assert_held(bo);
                bo->attr.atomic_access = op->atomic.val;
 
@@ -139,8 +153,14 @@ static void madvise_pat_index(struct xe_device *xe, struct xe_vm *vm,
 
        xe_assert(vm->xe, op->type == DRM_XE_MEM_RANGE_ATTR_PAT);
 
-       for (i = 0; i < num_vmas; i++)
-               vmas[i]->attr.pat_index = op->pat_index.val;
+       for (i = 0; i < num_vmas; i++) {
+               if (vmas[i]->attr.pat_index == op->pat_index.val) {
+                       vmas[i]->skip_invalidation = true;
+               } else {
+                       vmas[i]->skip_invalidation = false;
+                       vmas[i]->attr.pat_index = op->pat_index.val;
+               }
+       }
 }
 
 typedef void (*madvise_func)(struct xe_device *xe, struct xe_vm *vm,
@@ -157,7 +177,7 @@ static u8 xe_zap_ptes_in_madvise_range(struct xe_vm *vm, u64 start, u64 end)
 {
        struct drm_gpuva *gpuva;
        struct xe_tile *tile;
-       u8 id, tile_mask;
+       u8 id, tile_mask = 0;
 
        lockdep_assert_held_write(&vm->lock);
 
@@ -166,17 +186,20 @@ static u8 xe_zap_ptes_in_madvise_range(struct xe_vm *vm, u64 start, u64 end)
                                  false, MAX_SCHEDULE_TIMEOUT) <= 0)
                XE_WARN_ON(1);
 
-       tile_mask = xe_svm_ranges_zap_ptes_in_range(vm, start, end);
-
        drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
                struct xe_vma *vma = gpuva_to_vma(gpuva);
 
-               if (xe_vma_is_cpu_addr_mirror(vma) || xe_vma_is_null(vma))
+               if (vma->skip_invalidation || xe_vma_is_null(vma))
                        continue;
 
-               for_each_tile(tile, vm->xe, id) {
-                       if (xe_pt_zap_ptes(tile, vma)) {
-                               tile_mask |= BIT(id);
+               if (xe_vma_is_cpu_addr_mirror(vma)) {
+                       tile_mask |= xe_svm_ranges_zap_ptes_in_range(vm,
+                                                                     xe_vma_start(vma),
+                                                                     xe_vma_end(vma));
+               } else {
+                       for_each_tile(tile, vm->xe, id) {
+                               if (xe_pt_zap_ptes(tile, vma)) {
+                                       tile_mask |= BIT(id);
 
                                /*
                                 * WRITE_ONCE pairs with READ_ONCE
@@ -184,6 +207,7 @@ static u8 xe_zap_ptes_in_madvise_range(struct xe_vm *vm, u64 start, u64 end)
                                 */
                                WRITE_ONCE(vma->tile_invalidated,
                                           vma->tile_invalidated | BIT(id));
+                               }
                        }
                }
        }
index 5704f379c2256ecd4cb3ce3ac14558788e208fea..35b6344158f5cfac5dd31422b60862f6ee8218fb 100644 (file)
@@ -157,6 +157,12 @@ struct xe_vma {
        /** @tile_staged: bind is staged for this VMA */
        u8 tile_staged;
 
+       /**
+        * @skip_invalidation: Used in madvise to avoid invalidation
+        * if mem attributes doesn't change
+        */
+       bool skip_invalidation;
+
        /**
         * @ufence: The user fence that was provided with MAP.
         * Needs to be signalled before UNMAP can be processed.