Finally getting rid of it.
Signed-off-by: Christian König <christian.koenig@amd.com>
        struct device                   *dev;
        struct drm_device               *ddev;
        struct pci_dev                  *pdev;
-       struct rw_semaphore             exclusive_lock;
 
        /* ASIC */
        enum amd_asic_type              asic_type;
 
        bool reserved_buffers = false;
        int i, r;
 
-       down_read(&adev->exclusive_lock);
-       if (!adev->accel_working) {
-               up_read(&adev->exclusive_lock);
+       if (!adev->accel_working)
                return -EBUSY;
-       }
 
        parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0);
        if (!parser)
        r = amdgpu_cs_parser_init(parser, data);
        if (r) {
                DRM_ERROR("Failed to initialize parser !\n");
-               kfree(parser);
-               up_read(&adev->exclusive_lock);
+               amdgpu_cs_parser_fini(parser, r, false);
                r = amdgpu_cs_handle_lockup(adev, r);
                return r;
        }
 
                mutex_unlock(&job->job_lock);
                amdgpu_cs_parser_fini_late(parser);
-               up_read(&adev->exclusive_lock);
                return 0;
        }
 
        cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
 out:
        amdgpu_cs_parser_fini(parser, r, reserved_buffers);
-       up_read(&adev->exclusive_lock);
        r = amdgpu_cs_handle_lockup(adev, r);
        return r;
 }
 
        mutex_init(&adev->gfx.gpu_clock_mutex);
        mutex_init(&adev->srbm_mutex);
        mutex_init(&adev->grbm_idx_mutex);
-       init_rwsem(&adev->exclusive_lock);
        mutex_init(&adev->mn_lock);
        hash_init(adev->mn_hash);
 
        int i, r;
        int resched;
 
-       down_write(&adev->exclusive_lock);
-
        atomic_inc(&adev->gpu_reset_counter);
 
        /* block TTM */
                dev_info(adev->dev, "GPU reset failed\n");
        }
 
-       up_write(&adev->exclusive_lock);
        return r;
 }
 
 
        fence = to_amdgpu_fence(*f);
        if (fence) {
                r = fence_wait(&fence->base, false);
-               if (r == -EDEADLK) {
-                       up_read(&adev->exclusive_lock);
+               if (r == -EDEADLK)
                        r = amdgpu_gpu_reset(adev);
-                       down_read(&adev->exclusive_lock);
-               }
        } else
                r = fence_wait(*f, false);
 
        unsigned long flags;
        unsigned i;
 
-       down_read(&adev->exclusive_lock);
        amdgpu_flip_wait_fence(adev, &work->excl);
        for (i = 0; i < work->shared_count; ++i)
                amdgpu_flip_wait_fence(adev, &work->shared[i]);
        amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
 
        spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-       up_read(&adev->exclusive_lock);
 }
 
 /*
 
                                lockup_work.work);
        ring = fence_drv->ring;
 
-       if (!down_read_trylock(&ring->adev->exclusive_lock)) {
-               /* just reschedule the check if a reset is going on */
-               amdgpu_fence_schedule_check(ring);
-               return;
-       }
-
-       if (amdgpu_fence_activity(ring)) {
+       if (amdgpu_fence_activity(ring))
                wake_up_all(&ring->fence_drv.fence_queue);
-       }
-       up_read(&ring->adev->exclusive_lock);
 }
 
 /**
 {
        struct amdgpu_fence *fence = to_amdgpu_fence(f);
        struct amdgpu_ring *ring = fence->ring;
-       struct amdgpu_device *adev = ring->adev;
 
        if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
                return true;
 
-       if (down_read_trylock(&adev->exclusive_lock)) {
-               amdgpu_fence_process(ring);
-               up_read(&adev->exclusive_lock);
+       amdgpu_fence_process(ring);
+
+       if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
+               return true;
 
-               if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
-                       return true;
-       }
        return false;
 }
 
 
        bool kernel = false;
        int r;
 
-       down_read(&adev->exclusive_lock);
        /* create a gem object to contain this object in */
        if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
            AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
 
        memset(args, 0, sizeof(*args));
        args->out.handle = handle;
-       up_read(&adev->exclusive_lock);
        return 0;
 
 error_unlock:
-       up_read(&adev->exclusive_lock);
        r = amdgpu_gem_handle_lockup(adev, r);
        return r;
 }
                return -EACCES;
        }
 
-       down_read(&adev->exclusive_lock);
-
        /* create a gem object to contain this object in */
        r = amdgpu_gem_object_create(adev, args->size, 0,
                                     AMDGPU_GEM_DOMAIN_CPU, 0,
                goto handle_lockup;
 
        args->handle = handle;
-       up_read(&adev->exclusive_lock);
        return 0;
 
 release_object:
        drm_gem_object_unreference_unlocked(gobj);
 
 handle_lockup:
-       up_read(&adev->exclusive_lock);
        r = amdgpu_gem_handle_lockup(adev, r);
 
        return r;