job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
                  ring->fence_drv.sync_seq);
 
-       amdgpu_device_gpu_recover(job->adev, job, false);
+       amdgpu_device_gpu_recover(ring->adev, job, false);
 }
 
 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
        if (!*job)
                return -ENOMEM;
 
-       (*job)->adev = adev;
+       /*
+        * Initialize the scheduler to at least some ring so that we always
+        * have a pointer to adev.
+        */
+       (*job)->base.sched = &adev->rings[0]->sched;
        (*job)->vm = vm;
        (*job)->ibs = (void *)&(*job)[1];
        (*job)->num_ibs = num_ibs;
 
 void amdgpu_job_free_resources(struct amdgpu_job *job)
 {
+       struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
        struct dma_fence *f;
        unsigned i;
 
        f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
 
        for (i = 0; i < job->num_ibs; ++i)
-               amdgpu_ib_free(job->adev, &job->ibs[i], f);
+               amdgpu_ib_free(ring->adev, &job->ibs[i], f);
 }
 
 static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
 
        if (fence && explicit) {
                if (drm_sched_dependency_optimized(fence, s_entity)) {
-                       r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false);
+                       r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
+                                             fence, false);
                        if (r)
                                DRM_ERROR("Error adding fence to sync (%d)\n", r);
                }
 {
        struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
        struct dma_fence *fence = NULL, *finished;
-       struct amdgpu_device *adev;
        struct amdgpu_job *job;
        int r;
 
        }
        job = to_amdgpu_job(sched_job);
        finished = &job->base.s_fence->finished;
-       adev = job->adev;
 
        BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
 
        trace_amdgpu_sched_run_job(job);
 
-       if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
+       if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
                dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
 
        if (finished->error < 0) {