struct work_struct hotplug_work;
        int num_crtc; /* number of crtcs */
        struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
+       struct mutex vram_mutex;
 
        /* audio stuff */
        struct timer_list       audio_timer;
 
                spin_lock_init(&rdev->ih.lock);
        mutex_init(&rdev->gem.mutex);
        mutex_init(&rdev->pm.mutex);
+       mutex_init(&rdev->vram_mutex);
        rwlock_init(&rdev->fence_drv.lock);
        INIT_LIST_HEAD(&rdev->gem.objects);
        init_waitqueue_head(&rdev->irq.vblank_queue);
 
 
        radeon_ttm_placement_from_domain(bo, domain);
        /* Kernel allocation are uninterruptible */
+       mutex_lock(&rdev->vram_mutex);
        r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
                        &bo->placement, 0, 0, !kernel, NULL, size,
                        &radeon_ttm_bo_destroy);
+       mutex_unlock(&rdev->vram_mutex);
        if (unlikely(r != 0)) {
                if (r != -ERESTARTSYS)
                        dev_err(rdev->dev,
        if ((*bo) == NULL)
                return;
        tbo = &((*bo)->tbo);
+       mutex_lock(&(*bo)->rdev->vram_mutex);
        ttm_bo_unref(&tbo);
+       mutex_unlock(&(*bo)->rdev->vram_mutex);
        if (tbo == NULL)
                *bo = NULL;
 }
 
 static void radeon_pm_idle_work_handler(struct work_struct *work);
 static int radeon_debugfs_pm_init(struct radeon_device *rdev);
 
+static void radeon_unmap_vram_bos(struct radeon_device *rdev)
+{
+       struct radeon_bo *bo, *n;
+
+       if (list_empty(&rdev->gem.objects))
+               return;
+
+       list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
+               if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+                       ttm_bo_unmap_virtual(&bo->tbo);
+       }
+
+       if (rdev->gart.table.vram.robj)
+               ttm_bo_unmap_virtual(&rdev->gart.table.vram.robj->tbo);
+
+       if (rdev->stollen_vga_memory)
+               ttm_bo_unmap_virtual(&rdev->stollen_vga_memory->tbo);
+
+       if (rdev->r600_blit.shader_obj)
+               ttm_bo_unmap_virtual(&rdev->r600_blit.shader_obj->tbo);
+}
+
 static void radeon_pm_set_clocks(struct radeon_device *rdev, int static_switch)
 {
        int i;
        rdev->irq.gui_idle = false;
        radeon_irq_set(rdev);
 
+       mutex_lock(&rdev->vram_mutex);
+
+       radeon_unmap_vram_bos(rdev);
+
        if (!static_switch) {
                for (i = 0; i < rdev->num_crtc; i++) {
                        if (rdev->pm.active_crtcs & (1 << i)) {
                        }
                }
        }
+
+       mutex_unlock(&rdev->vram_mutex);
        
        /* update display watermarks based on new power state */
        radeon_update_bandwidth_info(rdev);
 
 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct ttm_buffer_object *bo;
+       struct radeon_device *rdev;
        int r;
 
-       bo = (struct ttm_buffer_object *)vma->vm_private_data;
+       bo = (struct ttm_buffer_object *)vma->vm_private_data;  
        if (bo == NULL) {
                return VM_FAULT_NOPAGE;
        }
+       rdev = radeon_get_rdev(bo->bdev);
+       mutex_lock(&rdev->vram_mutex);
        r = ttm_vm_ops->fault(vma, vmf);
+       mutex_unlock(&rdev->vram_mutex);
        return r;
 }