int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
 {
-       struct scatterlist *sg = obj->pages->sgl;
-       int nents = obj->pages->nents;
-       while (nents > SG_MAX_SINGLE_ALLOC) {
-               if (n < SG_MAX_SINGLE_ALLOC - 1)
-                       break;
-
-               sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
-               n -= SG_MAX_SINGLE_ALLOC - 1;
-               nents -= SG_MAX_SINGLE_ALLOC - 1;
-       }
-       return sg_page(sg+n);
+       struct sg_page_iter sg_iter;
+
+       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
+               return sg_iter.page;
+
+       return NULL;
 }
 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 {
 
        int obj_do_bit17_swizzling, page_do_bit17_swizzling;
        int prefaulted = 0;
        int needs_clflush = 0;
-       struct scatterlist *sg;
-       int i;
+       struct sg_page_iter sg_iter;
 
        user_data = to_user_ptr(args->data_ptr);
        remain = args->size;
 
        offset = args->offset;
 
-       for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
-               struct page *page;
-
-               if (i < offset >> PAGE_SHIFT)
-                       continue;
+       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
+                        offset >> PAGE_SHIFT) {
+               struct page *page = sg_iter.page;
 
                if (remain <= 0)
                        break;
                if ((shmem_page_offset + page_length) > PAGE_SIZE)
                        page_length = PAGE_SIZE - shmem_page_offset;
 
-               page = sg_page(sg);
                page_do_bit17_swizzling = obj_do_bit17_swizzling &&
                        (page_to_phys(page) & (1 << 17)) != 0;
 
        int hit_slowpath = 0;
        int needs_clflush_after = 0;
        int needs_clflush_before = 0;
-       int i;
-       struct scatterlist *sg;
+       struct sg_page_iter sg_iter;
 
        user_data = to_user_ptr(args->data_ptr);
        remain = args->size;
        offset = args->offset;
        obj->dirty = 1;
 
-       for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
-               struct page *page;
+       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
+                        offset >> PAGE_SHIFT) {
+               struct page *page = sg_iter.page;
                int partial_cacheline_write;
 
-               if (i < offset >> PAGE_SHIFT)
-                       continue;
-
                if (remain <= 0)
                        break;
 
                        ((shmem_page_offset | page_length)
                                & (boot_cpu_data.x86_clflush_size - 1));
 
-               page = sg_page(sg);
                page_do_bit17_swizzling = obj_do_bit17_swizzling &&
                        (page_to_phys(page) & (1 << 17)) != 0;
 
 
        src = obj->pages->sgl;
        dst = st->sgl;
        for (i = 0; i < obj->pages->nents; i++) {
-               sg_set_page(dst, sg_page(src), PAGE_SIZE, 0);
+               sg_set_page(dst, sg_page(src), src->length, 0);
                dst = sg_next(dst);
                src = sg_next(src);
        }
 {
        struct drm_i915_gem_object *obj = dma_buf->priv;
        struct drm_device *dev = obj->base.dev;
-       struct scatterlist *sg;
+       struct sg_page_iter sg_iter;
        struct page **pages;
        int ret, i;
 
 
        ret = -ENOMEM;
 
-       pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *));
+       pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
        if (pages == NULL)
                goto error;
 
-       for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i)
-               pages[i] = sg_page(sg);
+       i = 0;
+       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0);
+               pages[i++] = sg_iter.page;
 
-       obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL);
+       obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
        drm_free_large(pages);
 
        if (!obj->dma_buf_vmapping)
 
 void
 i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
 {
-       struct scatterlist *sg;
-       int page_count = obj->base.size >> PAGE_SHIFT;
+       struct sg_page_iter sg_iter;
        int i;
 
        if (obj->bit_17 == NULL)
                return;
 
-       for_each_sg(obj->pages->sgl, sg, page_count, i) {
-               struct page *page = sg_page(sg);
+       i = 0;
+       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+               struct page *page = sg_iter.page;
                char new_bit_17 = page_to_phys(page) >> 17;
                if ((new_bit_17 & 0x1) !=
                    (test_bit(i, obj->bit_17) != 0)) {
                        i915_gem_swizzle_page(page);
                        set_page_dirty(page);
                }
+               i++;
        }
 }
 
 void
 i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
 {
-       struct scatterlist *sg;
+       struct sg_page_iter sg_iter;
        int page_count = obj->base.size >> PAGE_SHIFT;
        int i;
 
                }
        }
 
-       for_each_sg(obj->pages->sgl, sg, page_count, i) {
-               struct page *page = sg_page(sg);
-               if (page_to_phys(page) & (1 << 17))
+       i = 0;
+       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+               if (page_to_phys(sg_iter.page) & (1 << 17))
                        __set_bit(i, obj->bit_17);
                else
                        __clear_bit(i, obj->bit_17);
+               i++;
        }
 }