drm_framebuffer_cleanup(&ifb->base);
        if (ifb->obj) {
-               drm_gem_object_handle_unreference_unlocked(ifb->obj);
-               drm_gem_object_unreference_unlocked(ifb->obj);
+               drm_gem_object_unreference(ifb->obj);
+               ifb->obj = NULL;
        }
 -
 -      return 0;
  }
  
  int intel_fbdev_init(struct drm_device *dev)
 
   */
  void r600_cp_stop(struct radeon_device *rdev)
  {
+       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
        WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
 +      WREG32(SCRATCH_UMSK, 0);
  }
  
  int r600_init_microcode(struct radeon_device *rdev)
 
        memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
        radeon_bo_kunmap(rdev->r600_blit.shader_obj);
        radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 +
 +done:
 +      r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
 +      if (unlikely(r != 0))
 +              return r;
 +      r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
 +                        &rdev->r600_blit.shader_gpu_addr);
 +      radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 +      if (r) {
 +              dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
 +              return r;
 +      }
+       rdev->mc.active_vram_size = rdev->mc.real_vram_size;
        return 0;
  }
  
 
   */
  void r700_cp_stop(struct radeon_device *rdev)
  {
+       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
        WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
 +      WREG32(SCRATCH_UMSK, 0);
  }
  
  static int rv770_cp_load_microcode(struct radeon_device *rdev)
 
        return ret;
  }
  
 -              drm_mm_put_block(bo->mem.mm_node);
 -              bo->mem.mm_node = NULL;
+ /**
+  * Call bo::reserved and with the lru lock held.
+  * Will release GPU memory type usage on destruction.
+  * This is the place to put in driver specific hooks.
+  * Will release the bo::reserved lock and the
+  * lru lock on exit.
+  */
+ 
+ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
+ {
+       struct ttm_bo_global *glob = bo->glob;
+ 
+       if (bo->ttm) {
+ 
+               /**
+                * Release the lru_lock, since we don't want to have
+                * an atomic requirement on ttm_tt[unbind|destroy].
+                */
+ 
+               spin_unlock(&glob->lru_lock);
+               ttm_tt_unbind(bo->ttm);
+               ttm_tt_destroy(bo->ttm);
+               bo->ttm = NULL;
+               spin_lock(&glob->lru_lock);
+       }
+ 
+       if (bo->mem.mm_node) {
++              ttm_bo_mem_put(bo, &bo->mem);
+       }
+ 
+       atomic_set(&bo->reserved, 0);
+       wake_up_all(&bo->event_queue);
+       spin_unlock(&glob->lru_lock);
+ }
+ 
+ 
  /**
   * If bo idle, remove from delayed- and lru lists, and unref.
   * If not idle, and already on delayed list, do nothing.