struct radeon_cs_parser parser;
        int r;
 
+       down_read(&rdev->exclusive_lock);
        if (!rdev->accel_working) {
+               up_read(&rdev->exclusive_lock);
                return -EBUSY;
        }
        /* initialize parser */
        if (r) {
                DRM_ERROR("Failed to initialize parser !\n");
                radeon_cs_parser_fini(&parser, r);
+               up_read(&rdev->exclusive_lock);
                r = radeon_cs_handle_lockup(rdev, r);
                return r;
        }
                if (r != -ERESTARTSYS)
                        DRM_ERROR("Failed to parse relocation %d!\n", r);
                radeon_cs_parser_fini(&parser, r);
+               up_read(&rdev->exclusive_lock);
                r = radeon_cs_handle_lockup(rdev, r);
                return r;
        }
        }
 out:
        radeon_cs_parser_fini(&parser, r);
+       up_read(&rdev->exclusive_lock);
        r = radeon_cs_handle_lockup(rdev, r);
        return r;
 }
 
        mutex_init(&rdev->gem.mutex);
        mutex_init(&rdev->pm.mutex);
        init_rwsem(&rdev->pm.mclk_lock);
+       init_rwsem(&rdev->exclusive_lock);
        init_waitqueue_head(&rdev->irq.vblank_queue);
        init_waitqueue_head(&rdev->irq.idle_queue);
        r = radeon_gem_init(rdev);
        int r;
        int resched;
 
+       down_write(&rdev->exclusive_lock);
        radeon_save_bios_scratch_regs(rdev);
        /* block TTM */
        resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
                dev_info(rdev->dev, "GPU reset failed\n");
        }
 
+       up_write(&rdev->exclusive_lock);
        return r;
 }
 
 
        uint32_t handle;
        int r;
 
+       down_read(&rdev->exclusive_lock);
        /* create a gem object to contain this object in */
        args->size = roundup(args->size, PAGE_SIZE);
        r = radeon_gem_object_create(rdev, args->size, args->alignment,
                                        args->initial_domain, false,
                                        false, &gobj);
        if (r) {
+               up_read(&rdev->exclusive_lock);
                r = radeon_gem_handle_lockup(rdev, r);
                return r;
        }
        /* drop reference from allocate - handle holds it now */
        drm_gem_object_unreference_unlocked(gobj);
        if (r) {
+               up_read(&rdev->exclusive_lock);
                r = radeon_gem_handle_lockup(rdev, r);
                return r;
        }
        args->handle = handle;
+       up_read(&rdev->exclusive_lock);
        return 0;
 }
 
 {
        /* transition the BO to a domain -
         * just validate the BO into a certain domain */
+       struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_gem_set_domain *args = data;
        struct drm_gem_object *gobj;
        struct radeon_bo *robj;
 
        /* for now if someone requests domain CPU -
         * just make sure the buffer is finished with */
+       down_read(&rdev->exclusive_lock);
 
        /* just do a BO wait for now */
        gobj = drm_gem_object_lookup(dev, filp, args->handle);
        if (gobj == NULL) {
+               up_read(&rdev->exclusive_lock);
                return -ENOENT;
        }
        robj = gem_to_radeon_bo(gobj);
        r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
 
        drm_gem_object_unreference_unlocked(gobj);
+       up_read(&rdev->exclusive_lock);
        r = radeon_gem_handle_lockup(robj->rdev, r);
        return r;
 }