]> www.infradead.org Git - users/hch/misc.git/commitdiff
drm/xe/bo: Update atomic_access attribute on madvise
authorHimal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Thu, 21 Aug 2025 17:30:59 +0000 (23:00 +0530)
committerHimal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Tue, 26 Aug 2025 05:55:36 +0000 (11:25 +0530)
Update the bo_atomic_access based on user-provided input and determine
the migration to smem during a CPU fault

v2 (Matthew Brost)
- Avoid cpu unmapping if bo is already in smem
- check atomics on smem too for ioctl
- Add comments

v3
- Avoid migration in prefetch

v4 (Matthew Brost)
- make sanity check function bool
- add assert for smem placement
- fix doc

v5 (Matthew Brost)
- NACK atomic fault with  DRM_XE_ATOMIC_CPU

Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20250821173104.3030148-16-himal.prasad.ghimiray@intel.com
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
drivers/gpu/drm/xe/xe_bo.c
drivers/gpu/drm/xe/xe_gt_pagefault.c
drivers/gpu/drm/xe/xe_vm.c
drivers/gpu/drm/xe/xe_vm_madvise.c

index 0a6a32cfb771f427daa06dd98fbdf58fa79755b2..7d1ff642b02aa56639f96720239f34a6ae7db297 100644 (file)
@@ -1712,6 +1712,18 @@ static void xe_gem_object_close(struct drm_gem_object *obj,
        }
 }
 
+static bool should_migrate_to_smem(struct xe_bo *bo)
+{
+       /*
+        * NOTE: The following atomic checks are platform-specific. For example,
+        * if a device supports CXL atomics, these may not be necessary or
+        * may behave differently.
+        */
+
+       return bo->attr.atomic_access == DRM_XE_ATOMIC_GLOBAL ||
+              bo->attr.atomic_access == DRM_XE_ATOMIC_CPU;
+}
+
 static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
 {
        struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
@@ -1720,7 +1732,7 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
        struct xe_bo *bo = ttm_to_xe_bo(tbo);
        bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
        vm_fault_t ret;
-       int idx;
+       int idx, r = 0;
 
        if (needs_rpm)
                xe_pm_runtime_get(xe);
@@ -1732,8 +1744,19 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
        if (drm_dev_enter(ddev, &idx)) {
                trace_xe_bo_cpu_fault(bo);
 
-               ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
-                                              TTM_BO_VM_NUM_PREFAULT);
+               if (should_migrate_to_smem(bo)) {
+                       xe_assert(xe, bo->flags & XE_BO_FLAG_SYSTEM);
+
+                       r = xe_bo_migrate(bo, XE_PL_TT);
+                       if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR)
+                               ret = VM_FAULT_NOPAGE;
+                       else if (r)
+                               ret = VM_FAULT_SIGBUS;
+               }
+               if (!ret)
+                       ret = ttm_bo_vm_fault_reserved(vmf,
+                                                      vmf->vma->vm_page_prot,
+                                                      TTM_BO_VM_NUM_PREFAULT);
                drm_dev_exit(idx);
 
                if (ret == VM_FAULT_RETRY &&
index ab43dec527768936d8db593d6053205a393c668d..4ea30fbce9bdc82541134ae858b78f4b70be1a0a 100644 (file)
@@ -75,7 +75,7 @@ static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
 }
 
 static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
-                      bool atomic, struct xe_vram_region *vram)
+                      bool need_vram_move, struct xe_vram_region *vram)
 {
        struct xe_bo *bo = xe_vma_bo(vma);
        struct xe_vm *vm = xe_vma_vm(vma);
@@ -85,26 +85,13 @@ static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
        if (err)
                return err;
 
-       if (atomic && vram) {
-               xe_assert(vm->xe, IS_DGFX(vm->xe));
+       if (!bo)
+               return 0;
 
-               if (xe_vma_is_userptr(vma)) {
-                       err = -EACCES;
-                       return err;
-               }
+       err = need_vram_move ? xe_bo_migrate(bo, vram->placement) :
+                              xe_bo_validate(bo, vm, true);
 
-               /* Migrate to VRAM, move should invalidate the VMA first */
-               err = xe_bo_migrate(bo, vram->placement);
-               if (err)
-                       return err;
-       } else if (bo) {
-               /* Create backing store if needed */
-               err = xe_bo_validate(bo, vm, true);
-               if (err)
-                       return err;
-       }
-
-       return 0;
+       return err;
 }
 
 static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma,
@@ -115,10 +102,14 @@ static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma,
        struct drm_exec exec;
        struct dma_fence *fence;
        ktime_t end = 0;
-       int err;
+       int err, needs_vram;
 
        lockdep_assert_held_write(&vm->lock);
 
+       needs_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic);
+       if (needs_vram < 0 || (needs_vram && xe_vma_is_userptr(vma)))
+               return needs_vram < 0 ? needs_vram : -EACCES;
+
        xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT, 1);
        xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_KB, xe_vma_size(vma) / 1024);
 
@@ -141,7 +132,7 @@ retry_userptr:
        /* Lock VM and BOs dma-resv */
        drm_exec_init(&exec, 0, 0);
        drm_exec_until_all_locked(&exec) {
-               err = xe_pf_begin(&exec, vma, atomic, tile->mem.vram);
+               err = xe_pf_begin(&exec, vma, needs_vram == 1, tile->mem.vram);
                drm_exec_retry_on_contention(&exec);
                if (xe_vm_validate_should_retry(&exec, err, &end))
                        err = -EAGAIN;
@@ -576,7 +567,7 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc)
        /* Lock VM and BOs dma-resv */
        drm_exec_init(&exec, 0, 0);
        drm_exec_until_all_locked(&exec) {
-               ret = xe_pf_begin(&exec, vma, true, tile->mem.vram);
+               ret = xe_pf_begin(&exec, vma, IS_DGFX(vm->xe), tile->mem.vram);
                drm_exec_retry_on_contention(&exec);
                if (ret)
                        break;
index c765c96b9bdeadc189bcf8e9b52c99e8e03b22c2..6574ae56a7306d25abaa47d92d66d722c443118f 100644 (file)
@@ -4242,15 +4242,18 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
  */
 int xe_vma_need_vram_for_atomic(struct xe_device *xe, struct xe_vma *vma, bool is_atomic)
 {
+       u32 atomic_access = xe_vma_bo(vma) ? xe_vma_bo(vma)->attr.atomic_access :
+                                            vma->attr.atomic_access;
+
        if (!IS_DGFX(xe) || !is_atomic)
-               return 0;
+               return false;
 
        /*
         * NOTE: The checks implemented here are platform-specific. For
         * instance, on a device supporting CXL atomics, these would ideally
         * work universally without additional handling.
         */
-       switch (vma->attr.atomic_access) {
+       switch (atomic_access) {
        case DRM_XE_ATOMIC_DEVICE:
                return !xe->info.has_device_atomics_on_smem;
 
index 3bd52063f9c21785f32e1743316fd1ba16dee056..212a03178d65a9e5ee6ca9e02229676d1d01d98a 100644 (file)
@@ -102,6 +102,7 @@ static void madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
                           struct xe_vma **vmas, int num_vmas,
                           struct drm_xe_madvise *op)
 {
+       struct xe_bo *bo;
        int i;
 
        xe_assert(vm->xe, op->type == DRM_XE_MEM_RANGE_ATTR_ATOMIC);
@@ -114,7 +115,19 @@ static void madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
                        continue;
 
                vmas[i]->attr.atomic_access = op->atomic.val;
-       /*TODO: handle bo backed vmas */
+
+               bo = xe_vma_bo(vmas[i]);
+               if (!bo)
+                       continue;
+
+               xe_bo_assert_held(bo);
+               bo->attr.atomic_access = op->atomic.val;
+
+               /* Invalidate cpu page table, so bo can migrate to smem in next access */
+               if (xe_bo_is_vram(bo) &&
+                   (bo->attr.atomic_access == DRM_XE_ATOMIC_CPU ||
+                    bo->attr.atomic_access == DRM_XE_ATOMIC_GLOBAL))
+                       ttm_bo_unmap_virtual(&bo->ttm);
        }
 }
 
@@ -262,6 +275,41 @@ static bool madvise_args_are_sane(struct xe_device *xe, const struct drm_xe_madv
        return true;
 }
 
+static bool check_bo_args_are_sane(struct xe_vm *vm, struct xe_vma **vmas,
+                                  int num_vmas, u32 atomic_val)
+{
+       struct xe_device *xe = vm->xe;
+       struct xe_bo *bo;
+       int i;
+
+       for (i = 0; i < num_vmas; i++) {
+               bo = xe_vma_bo(vmas[i]);
+               if (!bo)
+                       continue;
+               /*
+                * NOTE: The following atomic checks are platform-specific. For example,
+                * if a device supports CXL atomics, these may not be necessary or
+                * may behave differently.
+                */
+               if (XE_IOCTL_DBG(xe, atomic_val == DRM_XE_ATOMIC_CPU &&
+                                !(bo->flags & XE_BO_FLAG_SYSTEM)))
+                       return false;
+
+               if (XE_IOCTL_DBG(xe, atomic_val == DRM_XE_ATOMIC_DEVICE &&
+                                !(bo->flags & XE_BO_FLAG_VRAM0) &&
+                                !(bo->flags & XE_BO_FLAG_VRAM1) &&
+                                !(bo->flags & XE_BO_FLAG_SYSTEM &&
+                                  xe->info.has_device_atomics_on_smem)))
+                       return false;
+
+               if (XE_IOCTL_DBG(xe, atomic_val == DRM_XE_ATOMIC_GLOBAL &&
+                                (!(bo->flags & XE_BO_FLAG_SYSTEM) ||
+                                 (!(bo->flags & XE_BO_FLAG_VRAM0) &&
+                                  !(bo->flags & XE_BO_FLAG_VRAM1)))))
+                       return false;
+       }
+       return true;
+}
 /**
  * xe_vm_madvise_ioctl - Handle MADVise ioctl for a VM
  * @dev: DRM device pointer
@@ -313,6 +361,15 @@ int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
                goto unlock_vm;
 
        if (madvise_range.has_bo_vmas) {
+               if (args->type == DRM_XE_MEM_RANGE_ATTR_ATOMIC) {
+                       if (!check_bo_args_are_sane(vm, madvise_range.vmas,
+                                                   madvise_range.num_vmas,
+                                                   args->atomic.val)) {
+                               err = -EINVAL;
+                               goto unlock_vm;
+                       }
+               }
+
                drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES | DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
                drm_exec_until_all_locked(&exec) {
                        for (int i = 0; i < madvise_range.num_vmas; i++) {