]> www.infradead.org Git - users/hch/misc.git/commitdiff
drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter
authorHimal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Thu, 21 Aug 2025 17:30:48 +0000 (23:00 +0530)
committerHimal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Tue, 26 Aug 2025 05:55:35 +0000 (11:25 +0530)
This change simplifies the logic by ensuring that remapped previous or
next VMAs are created with the same memory attributes as the original VMA.
By passing struct xe_vma_mem_attr as a parameter, we maintain consistency
in memory attributes.

-v2
 *dst = *src (Matthew Brost)

-v3 (Matthew Brost)
 Drop unnecessary helper
 pass attr ptr as input to new_vma and vma_create

Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20250821173104.3030148-5-himal.prasad.ghimiray@intel.com
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
drivers/gpu/drm/xe/xe_vm.c

index bb831629d1499f9a12a5572bb486bb5c2b18548b..12b478e7ef0114d4071891d5fffea14c1867c1fc 100644 (file)
@@ -1168,7 +1168,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
                                    struct xe_bo *bo,
                                    u64 bo_offset_or_userptr,
                                    u64 start, u64 end,
-                                   u16 pat_index, unsigned int flags)
+                                   struct xe_vma_mem_attr *attr,
+                                   unsigned int flags)
 {
        struct xe_vma *vma;
        struct xe_tile *tile;
@@ -1223,7 +1224,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
        if (vm->xe->info.has_atomic_enable_pte_bit)
                vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
 
-       vma->attr.pat_index = pat_index;
+       vma->attr = *attr;
 
        if (bo) {
                struct drm_gpuvm_bo *vm_bo;
@@ -2475,7 +2476,7 @@ unwind_prefetch_ops:
 ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
 
 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
-                             u16 pat_index, unsigned int flags)
+                             struct xe_vma_mem_attr *attr, unsigned int flags)
 {
        struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
        struct drm_exec exec;
@@ -2504,7 +2505,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
        }
        vma = xe_vma_create(vm, bo, op->gem.offset,
                            op->va.addr, op->va.addr +
-                           op->va.range - 1, pat_index, flags);
+                           op->va.range - 1, attr, flags);
        if (IS_ERR(vma))
                goto err_unlock;
 
@@ -2647,6 +2648,15 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
                switch (op->base.op) {
                case DRM_GPUVA_OP_MAP:
                {
+                       struct xe_vma_mem_attr default_attr = {
+                               .preferred_loc = {
+                                       .devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
+                                       .migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
+                               },
+                               .atomic_access = DRM_XE_ATOMIC_UNDEFINED,
+                               .pat_index = op->map.pat_index,
+                       };
+
                        flags |= op->map.read_only ?
                                VMA_CREATE_FLAG_READ_ONLY : 0;
                        flags |= op->map.is_null ?
@@ -2656,7 +2666,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
                        flags |= op->map.is_cpu_addr_mirror ?
                                VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
 
-                       vma = new_vma(vm, &op->base.map, op->map.pat_index,
+                       vma = new_vma(vm, &op->base.map, &default_attr,
                                      flags);
                        if (IS_ERR(vma))
                                return PTR_ERR(vma);
@@ -2704,7 +2714,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
 
                        if (op->base.remap.prev) {
                                vma = new_vma(vm, op->base.remap.prev,
-                                             old->attr.pat_index, flags);
+                                             &old->attr, flags);
                                if (IS_ERR(vma))
                                        return PTR_ERR(vma);
 
@@ -2734,7 +2744,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
 
                        if (op->base.remap.next) {
                                vma = new_vma(vm, op->base.remap.next,
-                                             old->attr.pat_index, flags);
+                                             &old->attr, flags);
                                if (IS_ERR(vma))
                                        return PTR_ERR(vma);