]> www.infradead.org Git - users/hch/misc.git/commitdiff
drm/xe/svm: Add xe_svm_ranges_zap_ptes_in_range() for PTE zapping
authorHimal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Thu, 21 Aug 2025 17:30:51 +0000 (23:00 +0530)
committerHimal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Tue, 26 Aug 2025 05:55:35 +0000 (11:25 +0530)
Introduce xe_svm_ranges_zap_ptes_in_range(), a function to zap page table
entries (PTEs) for all SVM ranges within a user-specified address range.

-v2 (Matthew Brost)
Lock should be called even for tlb_invalidation

v3(Matthew Brost)
- Update comment
- s/notifier->itree.start/drm_gpusvm_notifier_start
- s/notifier->itree.last + 1/drm_gpusvm_notifier_end
- use WRITE_ONCE

Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20250821173104.3030148-8-himal.prasad.ghimiray@intel.com
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
drivers/gpu/drm/xe/xe_pt.c
drivers/gpu/drm/xe/xe_svm.c
drivers/gpu/drm/xe/xe_svm.h

index ba7a50bf3a2de18b5d6f55e601ef86c9ae33245f..bf50a821853e61cac3944153d8cffb13de7427d1 100644 (file)
@@ -950,7 +950,19 @@ bool xe_pt_zap_ptes_range(struct xe_tile *tile, struct xe_vm *vm,
        struct xe_pt *pt = vm->pt_root[tile->id];
        u8 pt_mask = (range->tile_present & ~range->tile_invalidated);
 
-       xe_svm_assert_in_notifier(vm);
+       /*
+        * Locking rules:
+        *
+        * - notifier_lock (write): full protection against page table changes
+        *   and MMU notifier invalidations.
+        *
+        * - notifier_lock (read) + vm_lock (write): combined protection against
+        *   invalidations and concurrent page table modifications. (e.g., madvise)
+        *
+        */
+       lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 0) ||
+                      (lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
+                      lockdep_is_held_type(&vm->lock, 0)));
 
        if (!(pt_mask & BIT(tile->id)))
                return false;
index ce42100cb753e4ef16135faabfa5b2b9cbf69303..c2306000f15e1820d7aa028dea1b16f862abf5d6 100644 (file)
@@ -1031,6 +1031,56 @@ int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
        return err;
 }
 
+/**
+ * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
+ * @vm: Pointer to the xe_vm structure
+ * @start: Start of the input range
+ * @end: End of the input range
+ *
+ * This function removes the page table entries (PTEs) associated
+ * with the svm ranges within the given input start and end
+ *
+ * Return: tile_mask for which gt's need to be tlb invalidated.
+ */
+u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
+{
+       struct drm_gpusvm_notifier *notifier;
+       struct xe_svm_range *range;
+       u64 adj_start, adj_end;
+       struct xe_tile *tile;
+       u8 tile_mask = 0;
+       u8 id;
+
+       lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
+                      lockdep_is_held_type(&vm->lock, 0));
+
+       drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
+               struct drm_gpusvm_range *r = NULL;
+
+               adj_start = max(start, drm_gpusvm_notifier_start(notifier));
+               adj_end = min(end, drm_gpusvm_notifier_end(notifier));
+               drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) {
+                       range = to_xe_range(r);
+                       for_each_tile(tile, vm->xe, id) {
+                               if (xe_pt_zap_ptes_range(tile, vm, range)) {
+                                       tile_mask |= BIT(id);
+                                       /*
+                                        * WRITE_ONCE pairs with READ_ONCE in
+                                        * xe_vm_has_valid_gpu_mapping().
+                                        * Must not fail after setting
+                                        * tile_invalidated and before
+                                        * TLB invalidation.
+                                        */
+                                       WRITE_ONCE(range->tile_invalidated,
+                                                  range->tile_invalidated | BIT(id));
+                               }
+                       }
+               }
+       }
+
+       return tile_mask;
+}
+
 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
 
 static struct drm_pagemap *tile_local_pagemap(struct xe_tile *tile)
index 184b3f4f0b2a5da7172e6dabf6218608fd659f35..046a9c4e95c24a2c61b4055cf1cf83b8544370af 100644 (file)
@@ -92,6 +92,8 @@ u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end,  struct xe_vma *v
 
 void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end);
 
+u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
+
 /**
  * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
  * @range: SVM range
@@ -310,6 +312,12 @@ void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
 {
 }
 
+static inline
+u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
+{
+       return 0;
+}
+
 #define xe_svm_assert_in_notifier(...) do {} while (0)
 #define xe_svm_range_has_dma_mapping(...) false