]> www.infradead.org Git - users/hch/misc.git/commitdiff
drm/xe: improve dma-resv handling for backup object
authorMatthew Auld <matthew.auld@intel.com>
Fri, 29 Aug 2025 16:47:16 +0000 (17:47 +0100)
committerMatthew Auld <matthew.auld@intel.com>
Fri, 5 Sep 2025 10:53:00 +0000 (11:53 +0100)
Since the dma-resv is shared we don't need to reserve and add a fence
slot fence twice, plus no need to loop through the dependencies.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://lore.kernel.org/r/20250829164715.720735-2-matthew.auld@intel.com
drivers/gpu/drm/xe/xe_bo.c
drivers/gpu/drm/xe/xe_migrate.c

index 4faf15d5fa6df55acd909dcb275dc4c3346c5d46..49911247f6cbdf3b47c4cb581bd3c9789890bd5f 100644 (file)
@@ -1260,14 +1260,11 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
                else
                        migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type);
 
+               xe_assert(xe, bo->ttm.base.resv == backup->ttm.base.resv);
                ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
                if (ret)
                        goto out_backup;
 
-               ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1);
-               if (ret)
-                       goto out_backup;
-
                fence = xe_migrate_copy(migrate, bo, backup, bo->ttm.resource,
                                        backup->ttm.resource, false);
                if (IS_ERR(fence)) {
@@ -1277,8 +1274,6 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
 
                dma_resv_add_fence(bo->ttm.base.resv, fence,
                                   DMA_RESV_USAGE_KERNEL);
-               dma_resv_add_fence(backup->ttm.base.resv, fence,
-                                  DMA_RESV_USAGE_KERNEL);
                dma_fence_put(fence);
        } else {
                ret = xe_bo_vmap(backup);
@@ -1356,10 +1351,6 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
                if (ret)
                        goto out_unlock_bo;
 
-               ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1);
-               if (ret)
-                       goto out_unlock_bo;
-
                fence = xe_migrate_copy(migrate, backup, bo,
                                        backup->ttm.resource, bo->ttm.resource,
                                        false);
@@ -1370,8 +1361,6 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
 
                dma_resv_add_fence(bo->ttm.base.resv, fence,
                                   DMA_RESV_USAGE_KERNEL);
-               dma_resv_add_fence(backup->ttm.base.resv, fence,
-                                  DMA_RESV_USAGE_KERNEL);
                dma_fence_put(fence);
        } else {
                ret = xe_bo_vmap(backup);
index 9643442ef101cf6f43bc639701dbd3da4540dda9..861d9d0633d198f0b2a6da63fedc9c38b9599605 100644 (file)
@@ -908,7 +908,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
                if (!fence) {
                        err = xe_sched_job_add_deps(job, src_bo->ttm.base.resv,
                                                    DMA_RESV_USAGE_BOOKKEEP);
-                       if (!err && src_bo != dst_bo)
+                       if (!err && src_bo->ttm.base.resv != dst_bo->ttm.base.resv)
                                err = xe_sched_job_add_deps(job, dst_bo->ttm.base.resv,
                                                            DMA_RESV_USAGE_BOOKKEEP);
                        if (err)