return 0;
 }
 
+static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
+{
+       drm_i915_private_t *dev_priv = obj->dev->dev_private;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+       return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+               obj_priv->tiling_mode != I915_TILING_NONE;
+}
+
 static inline int
 slow_shmem_copy(struct page *dst_page,
                int dst_offset,
        return 0;
 }
 
+static inline int
+slow_shmem_bit17_copy(struct page *gpu_page,
+                     int gpu_offset,
+                     struct page *cpu_page,
+                     int cpu_offset,
+                     int length,
+                     int is_read)
+{
+       char *gpu_vaddr, *cpu_vaddr;
+
+       /* Use the unswizzled path if this page isn't affected. */
+       if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
+               if (is_read)
+                       return slow_shmem_copy(cpu_page, cpu_offset,
+                                              gpu_page, gpu_offset, length);
+               else
+                       return slow_shmem_copy(gpu_page, gpu_offset,
+                                              cpu_page, cpu_offset, length);
+       }
+
+       gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
+       if (gpu_vaddr == NULL)
+               return -ENOMEM;
+
+       cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
+       if (cpu_vaddr == NULL) {
+               kunmap_atomic(gpu_vaddr, KM_USER0);
+               return -ENOMEM;
+       }
+
+       /* Copy the data, XORing A6 with A17 (1). The user already knows he's
+        * XORing with the other bits (A9 for Y, A9 and A10 for X)
+        */
+       while (length > 0) {
+               int cacheline_end = ALIGN(gpu_offset + 1, 64);
+               int this_length = min(cacheline_end - gpu_offset, length);
+               int swizzled_gpu_offset = gpu_offset ^ 64;
+
+               if (is_read) {
+                       memcpy(cpu_vaddr + cpu_offset,
+                              gpu_vaddr + swizzled_gpu_offset,
+                              this_length);
+               } else {
+                       memcpy(gpu_vaddr + swizzled_gpu_offset,
+                              cpu_vaddr + cpu_offset,
+                              this_length);
+               }
+               cpu_offset += this_length;
+               gpu_offset += this_length;
+               length -= this_length;
+       }
+
+       kunmap_atomic(cpu_vaddr, KM_USER1);
+       kunmap_atomic(gpu_vaddr, KM_USER0);
+
+       return 0;
+}
+
 /**
  * This is the fast shmem pread path, which attempts to copy_from_user directly
  * from the backing pages of the object to the user's address space.  On a
        int page_length;
        int ret;
        uint64_t data_ptr = args->data_ptr;
+       int do_bit17_swizzling;
 
        remain = args->size;
 
                goto fail_put_user_pages;
        }
 
+       do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+
        mutex_lock(&dev->struct_mutex);
 
        ret = i915_gem_object_get_pages(obj);
                if ((data_page_offset + page_length) > PAGE_SIZE)
                        page_length = PAGE_SIZE - data_page_offset;
 
-               ret = slow_shmem_copy(user_pages[data_page_index],
-                                     data_page_offset,
-                                     obj_priv->pages[shmem_page_index],
-                                     shmem_page_offset,
-                                     page_length);
+               if (do_bit17_swizzling) {
+                       ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
+                                                   shmem_page_offset,
+                                                   user_pages[data_page_index],
+                                                   data_page_offset,
+                                                   page_length,
+                                                   1);
+               } else {
+                       ret = slow_shmem_copy(user_pages[data_page_index],
+                                             data_page_offset,
+                                             obj_priv->pages[shmem_page_index],
+                                             shmem_page_offset,
+                                             page_length);
+               }
                if (ret)
                        goto fail_put_pages;
 
                return -EINVAL;
        }
 
-       ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
-       if (ret != 0)
+       if (i915_gem_object_needs_bit17_swizzle(obj)) {
                ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
+       } else {
+               ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
+               if (ret != 0)
+                       ret = i915_gem_shmem_pread_slow(dev, obj, args,
+                                                       file_priv);
+       }
 
        drm_gem_object_unreference(obj);
 
        int page_length;
        int ret;
        uint64_t data_ptr = args->data_ptr;
+       int do_bit17_swizzling;
 
        remain = args->size;
 
                goto fail_put_user_pages;
        }
 
+       do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+
        mutex_lock(&dev->struct_mutex);
 
        ret = i915_gem_object_get_pages(obj);
                if ((data_page_offset + page_length) > PAGE_SIZE)
                        page_length = PAGE_SIZE - data_page_offset;
 
-               ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
-                                     shmem_page_offset,
-                                     user_pages[data_page_index],
-                                     data_page_offset,
-                                     page_length);
+               if (do_bit17_swizzling) {
+                       ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
+                                                   shmem_page_offset,
+                                                   user_pages[data_page_index],
+                                                   data_page_offset,
+                                                   page_length,
+                                                   0);
+               } else {
+                       ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
+                                             shmem_page_offset,
+                                             user_pages[data_page_index],
+                                             data_page_offset,
+                                             page_length);
+               }
                if (ret)
                        goto fail_put_pages;
 
                        ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
                                                       file_priv);
                }
+       } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
+               ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
        } else {
                ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
                if (ret == -EFAULT) {
        if (--obj_priv->pages_refcount != 0)
                return;
 
+       if (obj_priv->tiling_mode != I915_TILING_NONE)
+               i915_gem_object_save_bit_17_swizzle(obj);
+
        for (i = 0; i < page_count; i++)
                if (obj_priv->pages[i] != NULL) {
                        if (obj_priv->dirty)
                }
                obj_priv->pages[i] = page;
        }
+
+       if (obj_priv->tiling_mode != I915_TILING_NONE)
+               i915_gem_object_do_bit_17_swizzle(obj);
+
        return 0;
 }
 
        i915_gem_free_mmap_offset(obj);
 
        drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
+       kfree(obj_priv->bit_17);
        drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
 }
 
 
  *
  */
 
+#include "linux/string.h"
+#include "linux/bitops.h"
 #include "drmP.h"
 #include "drm.h"
 #include "i915_drm.h"
                                swizzle_y = I915_BIT_6_SWIZZLE_9_11;
                        } else {
                                /* Bit 17 swizzling by the CPU in addition. */
-                               swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
-                               swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+                               swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
+                               swizzle_y = I915_BIT_6_SWIZZLE_9_17;
                        }
                        break;
                }
                        args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
                else
                        args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+
+               /* Hide bit 17 swizzling from the user.  This prevents old Mesa
+                * from aborting the application on sw fallbacks to bit 17,
+                * and we use the pread/pwrite bit17 paths to swizzle for it.
+                * If there was a user that was relying on the swizzle
+                * information for drm_intel_bo_map()ed reads/writes this would
+                * break it, but we don't have any of those.
+                */
+               if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
+                       args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
+               if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
+                       args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
+
                /* If we can't handle the swizzling, make it untiled. */
                if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
                        args->tiling_mode = I915_TILING_NONE;
                DRM_ERROR("unknown tiling mode\n");
        }
 
+       /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
+       if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
+               args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
+       if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
+               args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
+
        drm_gem_object_unreference(obj);
        mutex_unlock(&dev->struct_mutex);
 
        return 0;
 }
+
+/**
+ * Swap every 64 bytes of this page around, to account for it having a new
+ * bit 17 of its physical address and therefore being interpreted differently
+ * by the GPU.
+ */
+static int
+i915_gem_swizzle_page(struct page *page)
+{
+       char *vaddr;
+       int i;
+       char temp[64];
+
+       vaddr = kmap(page);
+       if (vaddr == NULL)
+               return -ENOMEM;
+
+       for (i = 0; i < PAGE_SIZE; i += 128) {
+               memcpy(temp, &vaddr[i], 64);
+               memcpy(&vaddr[i], &vaddr[i + 64], 64);
+               memcpy(&vaddr[i + 64], temp, 64);
+       }
+
+       kunmap(page);
+
+       return 0;
+}
+
+void
+i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       int page_count = obj->size >> PAGE_SHIFT;
+       int i;
+
+       if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
+               return;
+
+       if (obj_priv->bit_17 == NULL)
+               return;
+
+       for (i = 0; i < page_count; i++) {
+               char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
+               if ((new_bit_17 & 0x1) !=
+                   (test_bit(i, obj_priv->bit_17) != 0)) {
+                       int ret = i915_gem_swizzle_page(obj_priv->pages[i]);
+                       if (ret != 0) {
+                               DRM_ERROR("Failed to swizzle page\n");
+                               return;
+                       }
+                       set_page_dirty(obj_priv->pages[i]);
+               }
+       }
+}
+
+void
+i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       int page_count = obj->size >> PAGE_SHIFT;
+       int i;
+
+       if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
+               return;
+
+       if (obj_priv->bit_17 == NULL) {
+               obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
+                                          sizeof(long), GFP_KERNEL);
+               if (obj_priv->bit_17 == NULL) {
+                       DRM_ERROR("Failed to allocate memory for bit 17 "
+                                 "record\n");
+                       return;
+               }
+       }
+
+       for (i = 0; i < page_count; i++) {
+               if (page_to_phys(obj_priv->pages[i]) & (1 << 17))
+                       __set_bit(i, obj_priv->bit_17);
+               else
+                       __clear_bit(i, obj_priv->bit_17);
+       }
+}