bb->cs[bb->len++] = upper_32_bits(src_ofs);
}
-static int job_add_deps(struct xe_sched_job *job, struct dma_resv *resv,
- enum dma_resv_usage usage)
-{
- return drm_sched_job_add_resv_dependencies(&job->drm, resv, usage);
-}
-
static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm)
{
return usm ? m->usm_batch_base_ofs : m->batch_base_ofs;
xe_sched_job_add_migrate_flush(job, flush_flags);
if (!fence) {
- err = job_add_deps(job, src_bo->ttm.base.resv,
- DMA_RESV_USAGE_BOOKKEEP);
+ err = xe_sched_job_add_deps(job, src_bo->ttm.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP);
if (!err && src_bo != dst_bo)
- err = job_add_deps(job, dst_bo->ttm.base.resv,
- DMA_RESV_USAGE_BOOKKEEP);
+ err = xe_sched_job_add_deps(job, dst_bo->ttm.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP);
if (err)
goto err_job;
}
* fences, which are always tracked as
* DMA_RESV_USAGE_KERNEL.
*/
- err = job_add_deps(job, bo->ttm.base.resv,
- DMA_RESV_USAGE_KERNEL);
+ err = xe_sched_job_add_deps(job, bo->ttm.base.resv,
+ DMA_RESV_USAGE_KERNEL);
if (err)
goto err_job;
}
/* Wait on BO move */
if (bo) {
- err = job_add_deps(job, bo->ttm.base.resv,
- DMA_RESV_USAGE_KERNEL);
+ err = xe_sched_job_add_deps(job, bo->ttm.base.resv,
+ DMA_RESV_USAGE_KERNEL);
if (err)
goto err_job;
}
* trigger preempts before moving forward
*/
if (first_munmap_rebind) {
- err = job_add_deps(job, xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP);
+ err = xe_sched_job_add_deps(job, xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP);
if (err)
goto err_job;
}
for (i = 0; i < snapshot->batch_addr_len; i++)
drm_printf(p, "batch_addr[%u]: 0x%016llx\n", i, snapshot->batch_addr[i]);
}
+
+int xe_sched_job_add_deps(struct xe_sched_job *job, struct dma_resv *resv,
+ enum dma_resv_usage usage)
+{
+ return drm_sched_job_add_resv_dependencies(&job->drm, resv, usage);
+}