if (r)
goto out;
} else {
- drm_sched_start(&ring->sched, false);
+ drm_sched_start(&ring->sched);
}
}
if (!amdgpu_ring_sched_ready(ring))
continue;
- drm_sched_start(&ring->sched, true);
+ drm_sched_start(&ring->sched);
}
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
if (!amdgpu_ring_sched_ready(ring))
continue;
- drm_sched_start(&ring->sched, true);
+ drm_sched_start(&ring->sched);
}
amdgpu_device_unset_mp1_state(adev);
drm_sched_resubmit_jobs(&gpu->sched);
- drm_sched_start(&gpu->sched, true);
+ drm_sched_start(&gpu->sched);
return DRM_GPU_SCHED_STAT_NOMINAL;
out_no_timeout:
/* restart scheduler after GPU is usable again */
- drm_sched_start(&gpu->sched, true);
+ drm_sched_start(&gpu->sched);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
}
}
- drm_sched_start(&queue->scheduler, true);
+ drm_sched_start(&queue->scheduler);
}
/**
}
mutex_unlock(&pvr_dev->queues.lock);
- drm_sched_start(sched, true);
+ drm_sched_start(sched);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
lima_pm_idle(ldev);
drm_sched_resubmit_jobs(&pipe->base);
- drm_sched_start(&pipe->base, true);
+ drm_sched_start(&pipe->base);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
else
NV_PRINTK(warn, job->cli, "Generic job timeout.\n");
- drm_sched_start(sched, true);
+ drm_sched_start(sched);
return stat;
}
/* Restart the schedulers */
for (i = 0; i < NUM_JOB_SLOTS; i++)
- drm_sched_start(&pfdev->js->queue[i].sched, true);
+ drm_sched_start(&pfdev->js->queue[i].sched);
/* Re-enable job interrupts now that everything has been restarted. */
job_write(pfdev, JOB_INT_MASK,
static void panthor_vm_start(struct panthor_vm *vm)
{
- drm_sched_start(&vm->sched, true);
+ drm_sched_start(&vm->sched);
}
/**
list_for_each_entry(job, &queue->scheduler.pending_list, base.list)
job->base.s_fence->parent = dma_fence_get(job->done_fence);
- drm_sched_start(&queue->scheduler, true);
+ drm_sched_start(&queue->scheduler);
}
static void panthor_group_stop(struct panthor_group *group)
* drm_sched_start - recover jobs after a reset
*
* @sched: scheduler instance
- * @full_recovery: proceed with complete sched restart
*
*/
-void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
+void drm_sched_start(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *s_job, *tmp;
- int r;
/*
* Locking the list is not required here as the sched thread is parked
atomic_add(s_job->credits, &sched->credit_count);
- if (!full_recovery)
+ if (!fence) {
+ drm_sched_job_done(s_job, -ECANCELED);
continue;
+ }
- if (fence) {
- r = dma_fence_add_callback(fence, &s_job->cb,
- drm_sched_job_done_cb);
- if (r == -ENOENT)
- drm_sched_job_done(s_job, fence->error);
- else if (r)
- DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
- r);
- } else
- drm_sched_job_done(s_job, -ECANCELED);
+ if (dma_fence_add_callback(fence, &s_job->cb,
+ drm_sched_job_done_cb))
+ drm_sched_job_done(s_job, fence->error);
}
- if (full_recovery)
- drm_sched_start_timeout_unlocked(sched);
-
+ drm_sched_start_timeout_unlocked(sched);
drm_sched_wqueue_start(sched);
}
EXPORT_SYMBOL(drm_sched_start);
/* Unblock schedulers and restart their jobs. */
for (q = 0; q < V3D_MAX_QUEUES; q++) {
- drm_sched_start(&v3d->queue[q].sched, true);
+ drm_sched_start(&v3d->queue[q].sched);
}
mutex_unlock(&v3d->reset_lock);
void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
-void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
+void drm_sched_start(struct drm_gpu_scheduler *sched);
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
void drm_sched_increase_karma(struct drm_sched_job *bad);
void drm_sched_reset_karma(struct drm_sched_job *bad);