#define WATCH_INACTIVE 0
 #define WATCH_PWRITE   0
 
+#define I915_GEM_PHYS_CURSOR_0 1
+#define I915_GEM_PHYS_CURSOR_1 2
+#define I915_GEM_PHYS_OVERLAY_REGS 3
+#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
+
+struct drm_i915_gem_phys_object {
+       int id;
+       struct page **page_list;
+       drm_dma_handle_t *handle;
+       struct drm_gem_object *cur_obj;
+};
+
 typedef struct _drm_i915_ring_buffer {
        int tail_mask;
        unsigned long Size;
                uint32_t bit_6_swizzle_x;
                /** Bit 6 swizzling required for Y tiling */
                uint32_t bit_6_swizzle_y;
+
+               /* storage for physical objects */
+               struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
        } mm;
 } drm_i915_private_t;
 
        /** User space pin count and filp owning the pin */
        uint32_t user_pin_count;
        struct drm_file *pin_filp;
+
+       /** for phy allocated objects */
+       struct drm_i915_gem_phys_object *phys_obj;
 };
 
 /**
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
                                      int write);
+int i915_gem_attach_phys_object(struct drm_device *dev,
+                               struct drm_gem_object *obj, int id);
+void i915_gem_detach_phys_object(struct drm_device *dev,
+                                struct drm_gem_object *obj);
+void i915_gem_free_all_phys_object(struct drm_device *dev);
 
 /* i915_gem_tiling.c */
 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
 
 static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
 static int i915_gem_evict_something(struct drm_device *dev);
+static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+                               struct drm_i915_gem_pwrite *args,
+                               struct drm_file *file_priv);
 
 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
                     unsigned long end)
         * pread/pwrite currently are reading and writing from the CPU
         * perspective, requiring manual detiling by the client.
         */
-       if (obj_priv->tiling_mode == I915_TILING_NONE &&
-           dev->gtt_total != 0)
+       if (obj_priv->phys_obj)
+               ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
+       else if (obj_priv->tiling_mode == I915_TILING_NONE &&
+                dev->gtt_total != 0)
                ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
        else
                ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
        while (obj_priv->pin_count > 0)
                i915_gem_object_unpin(obj);
 
+       if (obj_priv->phys_obj)
+               i915_gem_detach_phys_object(dev, obj);
+
        i915_gem_object_unbind(obj);
 
        list = &obj->map_list;
 
        i915_gem_detect_bit_6_swizzle(dev);
 }
+
+/*
+ * Create a physically contiguous memory object for this object
+ * e.g. for cursor + overlay regs
+ */
+int i915_gem_init_phys_object(struct drm_device *dev,
+                             int id, int size)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_phys_object *phys_obj;
+       int ret;
+
+       if (dev_priv->mm.phys_objs[id - 1] || !size)
+               return 0;
+
+       phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
+       if (!phys_obj)
+               return -ENOMEM;
+
+       phys_obj->id = id;
+
+       phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
+       if (!phys_obj->handle) {
+               ret = -ENOMEM;
+               goto kfree_obj;
+       }
+#ifdef CONFIG_X86
+       set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
+#endif
+
+       dev_priv->mm.phys_objs[id - 1] = phys_obj;
+
+       return 0;
+kfree_obj:
+       drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
+       return ret;
+}
+
+void i915_gem_free_phys_object(struct drm_device *dev, int id)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_phys_object *phys_obj;
+
+       if (!dev_priv->mm.phys_objs[id - 1])
+               return;
+
+       phys_obj = dev_priv->mm.phys_objs[id - 1];
+       if (phys_obj->cur_obj) {
+               i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
+       }
+
+#ifdef CONFIG_X86
+       set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
+#endif
+       drm_pci_free(dev, phys_obj->handle);
+       kfree(phys_obj);
+       dev_priv->mm.phys_objs[id - 1] = NULL;
+}
+
+void i915_gem_free_all_phys_object(struct drm_device *dev)
+{
+       int i;
+
+       for (i = 0; i < I915_MAX_PHYS_OBJECT; i++)
+               i915_gem_free_phys_object(dev, i);
+}
+
+void i915_gem_detach_phys_object(struct drm_device *dev,
+                                struct drm_gem_object *obj)
+{
+       struct drm_i915_gem_object *obj_priv;
+       int i;
+       int ret;
+       int page_count;
+
+       obj_priv = obj->driver_private;
+       if (!obj_priv->phys_obj)
+               return;
+
+       ret = i915_gem_object_get_page_list(obj);
+       if (ret)
+               goto out;
+
+       page_count = obj->size / PAGE_SIZE;
+
+       for (i = 0; i < page_count; i++) {
+               char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0);
+               char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
+
+               memcpy(dst, src, PAGE_SIZE);
+               kunmap_atomic(dst, KM_USER0);
+       }
+       drm_clflush_pages(obj_priv->page_list, page_count);
+       drm_agp_chipset_flush(dev);
+out:
+       obj_priv->phys_obj->cur_obj = NULL;
+       obj_priv->phys_obj = NULL;
+}
+
+int
+i915_gem_attach_phys_object(struct drm_device *dev,
+                           struct drm_gem_object *obj, int id)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
+       int ret = 0;
+       int page_count;
+       int i;
+
+       if (id > I915_MAX_PHYS_OBJECT)
+               return -EINVAL;
+
+       obj_priv = obj->driver_private;
+
+       if (obj_priv->phys_obj) {
+               if (obj_priv->phys_obj->id == id)
+                       return 0;
+               i915_gem_detach_phys_object(dev, obj);
+       }
+
+
+       /* create a new object */
+       if (!dev_priv->mm.phys_objs[id - 1]) {
+               ret = i915_gem_init_phys_object(dev, id,
+                                               obj->size);
+               if (ret) {
+                       DRM_ERROR("failed to init phys object %d size: %d\n", id, obj->size);
+                       goto out;
+               }
+       }
+
+       /* bind to the object */
+       obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
+       obj_priv->phys_obj->cur_obj = obj;
+
+       ret = i915_gem_object_get_page_list(obj);
+       if (ret) {
+               DRM_ERROR("failed to get page list\n");
+               goto out;
+       }
+
+       page_count = obj->size / PAGE_SIZE;
+
+       for (i = 0; i < page_count; i++) {
+               char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0);
+               char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
+
+               memcpy(dst, src, PAGE_SIZE);
+               kunmap_atomic(src, KM_USER0);
+       }
+
+       return 0;
+out:
+       return ret;
+}
+
+static int
+i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+                    struct drm_i915_gem_pwrite *args,
+                    struct drm_file *file_priv)
+{
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       void *obj_addr;
+       int ret;
+       char __user *user_data;
+
+       user_data = (char __user *) (uintptr_t) args->data_ptr;
+       obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
+
+       DRM_ERROR("obj_addr %p, %lld\n", obj_addr, args->size);
+       ret = copy_from_user(obj_addr, user_data, args->size);
+       if (ret)
+               return -EFAULT;
+
+       drm_agp_chipset_flush(dev);
+       return 0;
+}
 
                return -ENOMEM;
        }
 
-       if (dev_priv->cursor_needs_physical) {
-               addr = dev->agp->base + obj_priv->gtt_offset;
-       } else {
+       /* we only need to pin inside GTT if cursor is non-phy */
+       if (!dev_priv->cursor_needs_physical) {
+               ret = i915_gem_object_pin(bo, PAGE_SIZE);
+               if (ret) {
+                       DRM_ERROR("failed to pin cursor bo\n");
+                       drm_gem_object_unreference(bo);
+                       return ret;
+               }
                addr = obj_priv->gtt_offset;
-       }
-
-       ret = i915_gem_object_pin(bo, PAGE_SIZE);
-       if (ret) {
-               DRM_ERROR("failed to pin cursor bo\n");
-               drm_gem_object_unreference(bo);
-               return ret;
+       } else {
+               ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
+               if (ret) {
+                       DRM_ERROR("failed to attach phys object\n");
+                       drm_gem_object_unreference(bo);
+                       return ret;
+               }
+               addr = obj_priv->phys_obj->handle->busaddr;
        }
 
        temp = 0;
        I915_WRITE(base, addr);
 
        if (intel_crtc->cursor_bo) {
-               i915_gem_object_unpin(intel_crtc->cursor_bo);
+               if (dev_priv->cursor_needs_physical) {
+                       if (intel_crtc->cursor_bo != bo)
+                               i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
+               } else
+                       i915_gem_object_unpin(intel_crtc->cursor_bo);
                drm_gem_object_unreference(intel_crtc->cursor_bo);
        }