]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
drm/xe/sched_job: Promote xe_sched_job_add_deps()
authorFrancois Dugast <francois.dugast@intel.com>
Fri, 14 Jun 2024 09:44:33 +0000 (11:44 +0200)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Fri, 14 Jun 2024 20:51:43 +0000 (16:51 -0400)
Move it out of the xe_migrate compilation unit so it can be re-used in
other places.

Cc: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240614094433.775866-1-francois.dugast@intel.com
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/xe/xe_exec.c
drivers/gpu/drm/xe/xe_migrate.c
drivers/gpu/drm/xe/xe_sched_job.c
drivers/gpu/drm/xe/xe_sched_job.h

index 4cf6c6ab48664d6831e5b9c182cd301c13301ba7..2d72cdec3a0b394f3b65dabf1c818fe6dd2857cc 100644 (file)
@@ -259,9 +259,9 @@ retry:
 
        /* Wait behind rebinds */
        if (!xe_vm_in_lr_mode(vm)) {
-               err = drm_sched_job_add_resv_dependencies(&job->drm,
-                                                         xe_vm_resv(vm),
-                                                         DMA_RESV_USAGE_KERNEL);
+               err = xe_sched_job_add_deps(job,
+                                           xe_vm_resv(vm),
+                                           DMA_RESV_USAGE_KERNEL);
                if (err)
                        goto err_put_job;
        }
index ddd50c3f7208004d423f37c87d36a01b24738b79..05f933787860fcecaeb4e7abbea211aa6d3d29e2 100644 (file)
@@ -647,12 +647,6 @@ static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
        bb->cs[bb->len++] = upper_32_bits(src_ofs);
 }
 
-static int job_add_deps(struct xe_sched_job *job, struct dma_resv *resv,
-                       enum dma_resv_usage usage)
-{
-       return drm_sched_job_add_resv_dependencies(&job->drm, resv, usage);
-}
-
 static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm)
 {
        return usm ? m->usm_batch_base_ofs : m->batch_base_ofs;
@@ -849,11 +843,11 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
 
                xe_sched_job_add_migrate_flush(job, flush_flags);
                if (!fence) {
-                       err = job_add_deps(job, src_bo->ttm.base.resv,
-                                          DMA_RESV_USAGE_BOOKKEEP);
+                       err = xe_sched_job_add_deps(job, src_bo->ttm.base.resv,
+                                                   DMA_RESV_USAGE_BOOKKEEP);
                        if (!err && src_bo != dst_bo)
-                               err = job_add_deps(job, dst_bo->ttm.base.resv,
-                                                  DMA_RESV_USAGE_BOOKKEEP);
+                               err = xe_sched_job_add_deps(job, dst_bo->ttm.base.resv,
+                                                           DMA_RESV_USAGE_BOOKKEEP);
                        if (err)
                                goto err_job;
                }
@@ -1091,8 +1085,8 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
                         * fences, which are always tracked as
                         * DMA_RESV_USAGE_KERNEL.
                         */
-                       err = job_add_deps(job, bo->ttm.base.resv,
-                                          DMA_RESV_USAGE_KERNEL);
+                       err = xe_sched_job_add_deps(job, bo->ttm.base.resv,
+                                                   DMA_RESV_USAGE_KERNEL);
                        if (err)
                                goto err_job;
                }
@@ -1417,8 +1411,8 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
 
        /* Wait on BO move */
        if (bo) {
-               err = job_add_deps(job, bo->ttm.base.resv,
-                                  DMA_RESV_USAGE_KERNEL);
+               err = xe_sched_job_add_deps(job, bo->ttm.base.resv,
+                                           DMA_RESV_USAGE_KERNEL);
                if (err)
                        goto err_job;
        }
@@ -1428,8 +1422,8 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
         * trigger preempts before moving forward
         */
        if (first_munmap_rebind) {
-               err = job_add_deps(job, xe_vm_resv(vm),
-                                  DMA_RESV_USAGE_BOOKKEEP);
+               err = xe_sched_job_add_deps(job, xe_vm_resv(vm),
+                                           DMA_RESV_USAGE_BOOKKEEP);
                if (err)
                        goto err_job;
        }
index 5c013904877a6bb21bf9b02be4ea8d06e3e1acf4..44d534e362cd39ebc338810c6f6a7318bd1cf75b 100644 (file)
@@ -363,3 +363,9 @@ xe_sched_job_snapshot_print(struct xe_sched_job_snapshot *snapshot,
        for (i = 0; i < snapshot->batch_addr_len; i++)
                drm_printf(p, "batch_addr[%u]: 0x%016llx\n", i, snapshot->batch_addr[i]);
 }
+
+int xe_sched_job_add_deps(struct xe_sched_job *job, struct dma_resv *resv,
+                         enum dma_resv_usage usage)
+{
+       return drm_sched_job_add_resv_dependencies(&job->drm, resv, usage);
+}
index f362e28455dbfe92d02c747a8c8609111ba30960..3dc72c5c1f139256651e647176cc14cc43f66e73 100644 (file)
@@ -90,4 +90,7 @@ struct xe_sched_job_snapshot *xe_sched_job_snapshot_capture(struct xe_sched_job
 void xe_sched_job_snapshot_free(struct xe_sched_job_snapshot *snapshot);
 void xe_sched_job_snapshot_print(struct xe_sched_job_snapshot *snapshot, struct drm_printer *p);
 
+int xe_sched_job_add_deps(struct xe_sched_job *job, struct dma_resv *resv,
+                         enum dma_resv_usage usage);
+
 #endif