]> www.infradead.org Git - linux.git/commitdiff
drm/xe: add interface to request physical alignment for buffer objects
authorJuha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Wed, 9 Oct 2024 15:19:46 +0000 (18:19 +0300)
committerMika Kahola <mika.kahola@intel.com>
Mon, 14 Oct 2024 14:33:39 +0000 (17:33 +0300)
Add xe_bo_create_pin_map_at_aligned() which augment
xe_bo_create_pin_map_at() with alignment parameter allowing to pass
required alignemnt if it differ from default.

Signed-off-by: Juha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Reviewed-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
Signed-off-by: Mika Kahola <mika.kahola@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20241009151947.2240099-2-juhapekka.heikkila@gmail.com
drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
drivers/gpu/drm/xe/xe_bo.c
drivers/gpu/drm/xe/xe_bo.h
drivers/gpu/drm/xe/xe_bo_types.h
drivers/gpu/drm/xe/xe_ggtt.c

index cb6c7598824be38a8d5f17c1677a2f95a88db43c..9c4cf050059ac2cc97cb49ad4df2b88562f011b7 100644 (file)
@@ -29,7 +29,7 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
 
        bo = xe_bo_create_locked_range(xe, xe_device_get_root_tile(xe),
                                       NULL, size, start, end,
-                                      ttm_bo_type_kernel, flags);
+                                      ttm_bo_type_kernel, flags, 0);
        if (IS_ERR(bo)) {
                err = PTR_ERR(bo);
                bo = NULL;
index 5e8f60a8d4316091bc76b19dfb21bc42e5dbf8f0..d5d30a0ff1e782cc3ee9d78b8f6261e674856852 100644 (file)
@@ -1454,7 +1454,8 @@ static struct xe_bo *
 __xe_bo_create_locked(struct xe_device *xe,
                      struct xe_tile *tile, struct xe_vm *vm,
                      size_t size, u64 start, u64 end,
-                     u16 cpu_caching, enum ttm_bo_type type, u32 flags)
+                     u16 cpu_caching, enum ttm_bo_type type, u32 flags,
+                     u64 alignment)
 {
        struct xe_bo *bo = NULL;
        int err;
@@ -1483,6 +1484,8 @@ __xe_bo_create_locked(struct xe_device *xe,
        if (IS_ERR(bo))
                return bo;
 
+       bo->min_align = alignment;
+
        /*
         * Note that instead of taking a reference no the drm_gpuvm_resv_bo(),
         * to ensure the shared resv doesn't disappear under the bo, the bo
@@ -1523,16 +1526,18 @@ struct xe_bo *
 xe_bo_create_locked_range(struct xe_device *xe,
                          struct xe_tile *tile, struct xe_vm *vm,
                          size_t size, u64 start, u64 end,
-                         enum ttm_bo_type type, u32 flags)
+                         enum ttm_bo_type type, u32 flags, u64 alignment)
 {
-       return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, flags);
+       return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type,
+                                    flags, alignment);
 }
 
 struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
                                  struct xe_vm *vm, size_t size,
                                  enum ttm_bo_type type, u32 flags)
 {
-       return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, flags);
+       return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type,
+                                    flags, 0);
 }
 
 struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
@@ -1542,7 +1547,7 @@ struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
 {
        struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL,
                                                 cpu_caching, ttm_bo_type_device,
-                                                flags | XE_BO_FLAG_USER);
+                                                flags | XE_BO_FLAG_USER, 0);
        if (!IS_ERR(bo))
                xe_bo_unlock_vm_held(bo);
 
@@ -1565,6 +1570,17 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile
                                      struct xe_vm *vm,
                                      size_t size, u64 offset,
                                      enum ttm_bo_type type, u32 flags)
+{
+       return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, offset,
+                                              type, flags, 0);
+}
+
+struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
+                                             struct xe_tile *tile,
+                                             struct xe_vm *vm,
+                                             size_t size, u64 offset,
+                                             enum ttm_bo_type type, u32 flags,
+                                             u64 alignment)
 {
        struct xe_bo *bo;
        int err;
@@ -1576,7 +1592,8 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile
                flags |= XE_BO_FLAG_GGTT;
 
        bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
-                                      flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+                                      flags | XE_BO_FLAG_NEEDS_CPU_ACCESS,
+                                      alignment);
        if (IS_ERR(bo))
                return bo;
 
index 6e4be52306dfc9f85e61b0c4f767e05a80bbde93..41624159a2918a41eff62e4c2e1bf9137e61878a 100644 (file)
@@ -77,7 +77,7 @@ struct xe_bo *
 xe_bo_create_locked_range(struct xe_device *xe,
                          struct xe_tile *tile, struct xe_vm *vm,
                          size_t size, u64 start, u64 end,
-                         enum ttm_bo_type type, u32 flags);
+                         enum ttm_bo_type type, u32 flags, u64 alignment);
 struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
                                  struct xe_vm *vm, size_t size,
                                  enum ttm_bo_type type, u32 flags);
@@ -94,6 +94,12 @@ struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
 struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
                                      struct xe_vm *vm, size_t size, u64 offset,
                                      enum ttm_bo_type type, u32 flags);
+struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
+                                             struct xe_tile *tile,
+                                             struct xe_vm *vm,
+                                             size_t size, u64 offset,
+                                             enum ttm_bo_type type, u32 flags,
+                                             u64 alignment);
 struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
                                     const void *data, size_t size,
                                     enum ttm_bo_type type, u32 flags);
index 2ed558ac2264a69cc093496d2bd66f812b62bb91..35372c46edfa5be1324d332b0cb8b64238191cdf 100644 (file)
@@ -76,6 +76,11 @@ struct xe_bo {
 
        /** @vram_userfault_link: Link into @mem_access.vram_userfault.list */
                struct list_head vram_userfault_link;
+
+       /** @min_align: minimum alignment needed for this BO if different
+        * from default
+        */
+       u64 min_align;
 };
 
 #define intel_bo_to_drm_bo(bo) (&(bo)->ttm.base)
index 47bfd9d2635d0bcd6f9115e9cd715ed7caf8033c..1b31782269871cd929b4f98b1b4310a7a40acfe3 100644 (file)
@@ -603,7 +603,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
                                  u64 start, u64 end)
 {
        int err;
-       u64 alignment = XE_PAGE_SIZE;
+       u64 alignment = bo->min_align > 0 ? bo->min_align : XE_PAGE_SIZE;
 
        if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
                alignment = SZ_64K;