dev_priv->cfb_size = size;
  
 +      intel_disable_fbc(dev);
+       dev_priv->compressed_fb = compressed_fb;
+ 
        if (IS_GM45(dev)) {
 -              g4x_disable_fbc(dev);
                I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
        } else {
 -              i8xx_disable_fbc(dev);
                I915_WRITE(FBC_CFB_BASE, cfb_base);
                I915_WRITE(FBC_LL_BASE, ll_base);
+               dev_priv->compressed_llb = compressed_llb;
        }
  
        DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
 
  
        enum no_fbc_reason no_fbc_reason;
  
+       struct drm_mm_node *compressed_fb;
+       struct drm_mm_node *compressed_llb;
++
 +      /* list of fbdev register on this device */
 +      struct intel_fbdev *fbdev;
  } drm_i915_private_t;
  
  /** driver private structure attached to each drm_gem_object */
  
  #define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) ||       \
                            IS_GEN6(dev))
+ #define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
  
 +#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
 +#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
 +
  #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
  
  #endif
 
        return 0;
  }
  
 -      obj = drm_gem_object_alloc(dev, 4096);
+ /*
+  * 965+ support PIPE_CONTROL commands, which provide finer grained control
+  * over cache flushing.
+  */
+ static int
+ i915_gem_init_pipe_control(struct drm_device *dev)
+ {
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+       int ret;
+ 
++      obj = i915_gem_alloc_object(dev, 4096);
+       if (obj == NULL) {
+               DRM_ERROR("Failed to allocate seqno page\n");
+               ret = -ENOMEM;
+               goto err;
+       }
+       obj_priv = to_intel_bo(obj);
+       obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+ 
+       ret = i915_gem_object_pin(obj, 4096);
+       if (ret)
+               goto err_unref;
+ 
+       dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
+       dev_priv->seqno_page =  kmap(obj_priv->pages[0]);
+       if (dev_priv->seqno_page == NULL)
+               goto err_unpin;
+ 
+       dev_priv->seqno_obj = obj;
+       memset(dev_priv->seqno_page, 0, PAGE_SIZE);
+ 
+       return 0;
+ 
+ err_unpin:
+       i915_gem_object_unpin(obj);
+ err_unref:
+       drm_gem_object_unreference(obj);
+ err:
+       return ret;
+ }
+ 
  static int
  i915_gem_init_hws(struct drm_device *dev)
  {
        if (!I915_NEED_GFX_HWS(dev))
                return 0;
  
 -      obj = drm_gem_object_alloc(dev, 4096);
 +      obj = i915_gem_alloc_object(dev, 4096);
        if (obj == NULL) {
                DRM_ERROR("Failed to allocate status page\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto err;
        }
        obj_priv = to_intel_bo(obj);
        obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
 
  {
        uint32_t gb_tile_config, tmp;
  
-       /* FIXME: rv380 one pipes ? */
 -      r100_hdp_reset(rdev);
        if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
-           (rdev->family == CHIP_R350)) {
+           (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
                /* r300,r350 */
                rdev->num_gb_pipes = 2;
        } else {