}
 
 int evergreen_copy_blit(struct radeon_device *rdev,
-                       uint64_t src_offset, uint64_t dst_offset,
-                       unsigned num_pages, struct radeon_fence *fence)
+                       uint64_t src_offset,
+                       uint64_t dst_offset,
+                       unsigned num_gpu_pages,
+                       struct radeon_fence *fence)
 {
        int r;
 
        mutex_lock(&rdev->r600_blit.mutex);
        rdev->r600_blit.vb_ib = NULL;
-       r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+       r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
        if (r) {
                if (rdev->r600_blit.vb_ib)
                        radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
                mutex_unlock(&rdev->r600_blit.mutex);
                return r;
        }
-       evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
+       evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
        evergreen_blit_done_copy(rdev, fence);
        mutex_unlock(&rdev->r600_blit.mutex);
        return 0;
 
 int r100_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset,
                   uint64_t dst_offset,
-                  unsigned num_pages,
+                  unsigned num_gpu_pages,
                   struct radeon_fence *fence)
 {
        uint32_t cur_pages;
-       uint32_t stride_bytes = PAGE_SIZE;
+       uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
        uint32_t pitch;
        uint32_t stride_pixels;
        unsigned ndw;
        /* radeon pitch is /64 */
        pitch = stride_bytes / 64;
        stride_pixels = stride_bytes / 4;
-       num_loops = DIV_ROUND_UP(num_pages, 8191);
+       num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
 
        /* Ask for enough room for blit + flush + fence */
        ndw = 64 + (10 * num_loops);
                DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
                return -EINVAL;
        }
-       while (num_pages > 0) {
-               cur_pages = num_pages;
+       while (num_gpu_pages > 0) {
+               cur_pages = num_gpu_pages;
                if (cur_pages > 8191) {
                        cur_pages = 8191;
                }
-               num_pages -= cur_pages;
+               num_gpu_pages -= cur_pages;
 
                /* pages are in Y direction - height
                   page width in X direction - width */
 
 int r200_copy_dma(struct radeon_device *rdev,
                  uint64_t src_offset,
                  uint64_t dst_offset,
-                 unsigned num_pages,
+                 unsigned num_gpu_pages,
                  struct radeon_fence *fence)
 {
        uint32_t size;
        int r = 0;
 
        /* radeon pitch is /64 */
-       size = num_pages << PAGE_SHIFT;
+       size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
        num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
        r = radeon_ring_lock(rdev, num_loops * 4 + 64);
        if (r) {
 
 }
 
 int r600_copy_blit(struct radeon_device *rdev,
-                  uint64_t src_offset, uint64_t dst_offset,
-                  unsigned num_pages, struct radeon_fence *fence)
+                  uint64_t src_offset,
+                  uint64_t dst_offset,
+                  unsigned num_gpu_pages,
+                  struct radeon_fence *fence)
 {
        int r;
 
        mutex_lock(&rdev->r600_blit.mutex);
        rdev->r600_blit.vb_ib = NULL;
-       r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+       r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
        if (r) {
                if (rdev->r600_blit.vb_ib)
                        radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
                mutex_unlock(&rdev->r600_blit.mutex);
                return r;
        }
-       r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
+       r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
        r600_blit_done_copy(rdev, fence);
        mutex_unlock(&rdev->r600_blit.mutex);
        return 0;
 
 
 #define RADEON_GPU_PAGE_SIZE 4096
 #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
+#define RADEON_GPU_PAGE_SHIFT 12
 
 struct radeon_gart {
        dma_addr_t                      table_addr;
        int (*copy_blit)(struct radeon_device *rdev,
                         uint64_t src_offset,
                         uint64_t dst_offset,
-                        unsigned num_pages,
+                        unsigned num_gpu_pages,
                         struct radeon_fence *fence);
        int (*copy_dma)(struct radeon_device *rdev,
                        uint64_t src_offset,
                        uint64_t dst_offset,
-                       unsigned num_pages,
+                       unsigned num_gpu_pages,
                        struct radeon_fence *fence);
        int (*copy)(struct radeon_device *rdev,
                    uint64_t src_offset,
                    uint64_t dst_offset,
-                   unsigned num_pages,
+                   unsigned num_gpu_pages,
                    struct radeon_fence *fence);
        uint32_t (*get_engine_clock)(struct radeon_device *rdev);
        void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
 
 int r100_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset,
                   uint64_t dst_offset,
-                  unsigned num_pages,
+                  unsigned num_gpu_pages,
                   struct radeon_fence *fence);
 int r100_set_surface_reg(struct radeon_device *rdev, int reg,
                         uint32_t tiling_flags, uint32_t pitch,
 extern int r200_copy_dma(struct radeon_device *rdev,
                         uint64_t src_offset,
                         uint64_t dst_offset,
-                        unsigned num_pages,
+                        unsigned num_gpu_pages,
                         struct radeon_fence *fence);
 void r200_set_safe_registers(struct radeon_device *rdev);
 
 int r600_ring_test(struct radeon_device *rdev);
 int r600_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset, uint64_t dst_offset,
-                  unsigned num_pages, struct radeon_fence *fence);
+                  unsigned num_gpu_pages, struct radeon_fence *fence);
 void r600_hpd_init(struct radeon_device *rdev);
 void r600_hpd_fini(struct radeon_device *rdev);
 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
 void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int evergreen_copy_blit(struct radeon_device *rdev,
                        uint64_t src_offset, uint64_t dst_offset,
-                       unsigned num_pages, struct radeon_fence *fence);
+                       unsigned num_gpu_pages, struct radeon_fence *fence);
 void evergreen_hpd_init(struct radeon_device *rdev);
 void evergreen_hpd_fini(struct radeon_device *rdev);
 bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
 
                DRM_ERROR("Trying to move memory with CP turned off.\n");
                return -EINVAL;
        }
-       r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
+
+       BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
+
+       r = radeon_copy(rdev, old_start, new_start,
+                       new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
+                       fence);
        /* FIXME: handle copy error */
        r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
                                      evict, no_wait_reserve, no_wait_gpu, new_mem);