return 0;
 }
 
-static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
+static int
+i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 {
-       drm_dma_handle_t *phys = obj->phys_handle;
+       struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
+       char *vaddr = obj->phys_handle->vaddr;
+       struct sg_table *st;
+       struct scatterlist *sg;
+       int i;
 
-       if (!phys)
-               return;
+       if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
+               return -EINVAL;
+
+       for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+               struct page *page;
+               char *src;
+
+               page = shmem_read_mapping_page(mapping, i);
+               if (IS_ERR(page))
+                       return PTR_ERR(page);
+
+               src = kmap_atomic(page);
+               memcpy(vaddr, src, PAGE_SIZE);
+               drm_clflush_virt_range(vaddr, PAGE_SIZE);
+               kunmap_atomic(src);
+
+               page_cache_release(page);
+               vaddr += PAGE_SIZE;
+       }
+
+       i915_gem_chipset_flush(obj->base.dev);
+
+       st = kmalloc(sizeof(*st), GFP_KERNEL);
+       if (st == NULL)
+               return -ENOMEM;
+
+       if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+               kfree(st);
+               return -ENOMEM;
+       }
+
+       sg = st->sgl;
+       sg->offset = 0;
+       sg->length = obj->base.size;
 
-       if (obj->madv == I915_MADV_WILLNEED) {
+       sg_dma_address(sg) = obj->phys_handle->busaddr;
+       sg_dma_len(sg) = obj->base.size;
+
+       obj->pages = st;
+       obj->has_dma_mapping = true;
+       return 0;
+}
+
+static void
+i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
+{
+       int ret;
+
+       BUG_ON(obj->madv == __I915_MADV_PURGED);
+
+       ret = i915_gem_object_set_to_cpu_domain(obj, true);
+       if (ret) {
+               /* In the event of a disaster, abandon all caches and
+                * hope for the best.
+                */
+               WARN_ON(ret != -EIO);
+               obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       }
+
+       if (obj->madv == I915_MADV_DONTNEED)
+               obj->dirty = 0;
+
+       if (obj->dirty) {
                struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
-               char *vaddr = phys->vaddr;
+               char *vaddr = obj->phys_handle->vaddr;
                int i;
 
                for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
-                       struct page *page = shmem_read_mapping_page(mapping, i);
-                       if (!IS_ERR(page)) {
-                               char *dst = kmap_atomic(page);
-                               memcpy(dst, vaddr, PAGE_SIZE);
-                               drm_clflush_virt_range(dst, PAGE_SIZE);
-                               kunmap_atomic(dst);
-
-                               set_page_dirty(page);
+                       struct page *page;
+                       char *dst;
+
+                       page = shmem_read_mapping_page(mapping, i);
+                       if (IS_ERR(page))
+                               continue;
+
+                       dst = kmap_atomic(page);
+                       drm_clflush_virt_range(vaddr, PAGE_SIZE);
+                       memcpy(dst, vaddr, PAGE_SIZE);
+                       kunmap_atomic(dst);
+
+                       set_page_dirty(page);
+                       if (obj->madv == I915_MADV_WILLNEED)
                                mark_page_accessed(page);
-                               page_cache_release(page);
-                       }
+                       page_cache_release(page);
                        vaddr += PAGE_SIZE;
                }
-               i915_gem_chipset_flush(obj->base.dev);
+               obj->dirty = 0;
        }
 
-#ifdef CONFIG_X86
-       set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
-#endif
-       drm_pci_free(obj->base.dev, phys);
-       obj->phys_handle = NULL;
+       sg_free_table(obj->pages);
+       kfree(obj->pages);
+
+       obj->has_dma_mapping = false;
+}
+
+static void
+i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
+{
+       drm_pci_free(obj->base.dev, obj->phys_handle);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
+       .get_pages = i915_gem_object_get_pages_phys,
+       .put_pages = i915_gem_object_put_pages_phys,
+       .release = i915_gem_object_release_phys,
+};
+
+static int
+drop_pages(struct drm_i915_gem_object *obj)
+{
+       struct i915_vma *vma, *next;
+       int ret;
+
+       drm_gem_object_reference(&obj->base);
+       list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
+               if (i915_vma_unbind(vma))
+                       break;
+
+       ret = i915_gem_object_put_pages(obj);
+       drm_gem_object_unreference(&obj->base);
+
+       return ret;
 }
 
 int
                            int align)
 {
        drm_dma_handle_t *phys;
-       struct address_space *mapping;
-       char *vaddr;
-       int i;
+       int ret;
 
        if (obj->phys_handle) {
                if ((unsigned long)obj->phys_handle->vaddr & (align -1))
        if (obj->base.filp == NULL)
                return -EINVAL;
 
+       ret = drop_pages(obj);
+       if (ret)
+               return ret;
+
        /* create a new object */
        phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
        if (!phys)
                return -ENOMEM;
 
-       vaddr = phys->vaddr;
-#ifdef CONFIG_X86
-       set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
-#endif
-       mapping = file_inode(obj->base.filp)->i_mapping;
-       for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
-               struct page *page;
-               char *src;
-
-               page = shmem_read_mapping_page(mapping, i);
-               if (IS_ERR(page)) {
-#ifdef CONFIG_X86
-                       set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
-#endif
-                       drm_pci_free(obj->base.dev, phys);
-                       return PTR_ERR(page);
-               }
-
-               src = kmap_atomic(page);
-               memcpy(vaddr, src, PAGE_SIZE);
-               kunmap_atomic(src);
-
-               mark_page_accessed(page);
-               page_cache_release(page);
-
-               vaddr += PAGE_SIZE;
-       }
-
        obj->phys_handle = phys;
-       return 0;
+       obj->ops = &i915_gem_phys_ops;
+
+       return i915_gem_object_get_pages(obj);
 }
 
 static int
        struct drm_device *dev = obj->base.dev;
        void *vaddr = obj->phys_handle->vaddr + args->offset;
        char __user *user_data = to_user_ptr(args->data_ptr);
+       int ret;
+
+       /* We manually control the domain here and pretend that it
+        * remains coherent i.e. in the GTT domain, like shmem_pwrite.
+        */
+       ret = i915_gem_object_wait_rendering(obj, false);
+       if (ret)
+               return ret;
 
        if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
                unsigned long unwritten;
                        return -EFAULT;
        }
 
+       drm_clflush_virt_range(vaddr, args->size);
        i915_gem_chipset_flush(dev);
        return 0;
 }
         * pread/pwrite currently are reading and writing from the CPU
         * perspective, requiring manual detiling by the client.
         */
-       if (obj->phys_handle) {
-               ret = i915_gem_phys_pwrite(obj, args, file);
-               goto out;
-       }
-
        if (obj->tiling_mode == I915_TILING_NONE &&
            obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
            cpu_write_needs_clflush(obj)) {
                 * textures). Fallback to the shmem path in that case. */
        }
 
-       if (ret == -EFAULT || ret == -ENOSPC)
-               ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+       if (ret == -EFAULT || ret == -ENOSPC) {
+               if (obj->phys_handle)
+                       ret = i915_gem_phys_pwrite(obj, args, file);
+               else
+                       ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+       }
 
 out:
        drm_gem_object_unreference(&obj->base);
         * Stolen memory is always coherent with the GPU as it is explicitly
         * marked as wc by the system, or the system is cache-coherent.
         */
-       if (obj->stolen)
+       if (obj->stolen || obj->phys_handle)
                return false;
 
        /* If the GPU is snooping the contents of the CPU cache,
                }
        }
 
-       i915_gem_object_detach_phys(obj);
-
        /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
         * before progressing. */
        if (obj->stolen)