if (!new_table)
                return ERR_PTR(-ENOMEM);
 
-       ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
+       ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
        if (ret) {
                kfree(new_table);
                return ERR_PTR(-ENOMEM);
        }
 
        new_sg = new_table->sgl;
-       for_each_sg(table->sgl, sg, table->nents, i) {
+       for_each_sgtable_sg(table, sg, i) {
                memcpy(new_sg, sg, sizeof(*sg));
                new_sg->dma_address = 0;
                new_sg = sg_next(new_sg);
 {
        struct ion_dma_buf_attachment *a = attachment->priv;
        struct sg_table *table;
+       int ret;
 
        table = a->table;
 
-       if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
-                       direction))
-               return ERR_PTR(-ENOMEM);
+       ret = dma_map_sgtable(attachment->dev, table, direction, 0);
+       if (ret)
+               return ERR_PTR(ret);
 
        return table;
 }
                              struct sg_table *table,
                              enum dma_data_direction direction)
 {
-       dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
+       dma_unmap_sgtable(attachment->dev, table, direction, 0);
 }
 
 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
        }
 
        mutex_lock(&buffer->lock);
-       list_for_each_entry(a, &buffer->attachments, list) {
-               dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
-                                   direction);
-       }
+       list_for_each_entry(a, &buffer->attachments, list)
+               dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
 
 unlock:
        mutex_unlock(&buffer->lock);
        }
 
        mutex_lock(&buffer->lock);
-       list_for_each_entry(a, &buffer->attachments, list) {
-               dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
-                                      direction);
-       }
+       list_for_each_entry(a, &buffer->attachments, list)
+               dma_sync_sgtable_for_device(a->dev, a->table, direction);
        mutex_unlock(&buffer->lock);
 
        return 0;
 
 void *ion_heap_map_kernel(struct ion_heap *heap,
                          struct ion_buffer *buffer)
 {
-       struct scatterlist *sg;
-       int i, j;
+       struct sg_page_iter piter;
        void *vaddr;
        pgprot_t pgprot;
        struct sg_table *table = buffer->sg_table;
        else
                pgprot = pgprot_writecombine(PAGE_KERNEL);
 
-       for_each_sg(table->sgl, sg, table->nents, i) {
-               int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
-               struct page *page = sg_page(sg);
-
-               BUG_ON(i >= npages);
-               for (j = 0; j < npages_this_entry; j++)
-                       *(tmp++) = page++;
+       for_each_sgtable_page(table, &piter, 0) {
+               BUG_ON(tmp - pages >= npages);
+               *tmp++ = sg_page_iter_page(&piter);
        }
+
        vaddr = vmap(pages, npages, VM_MAP, pgprot);
        vfree(pages);
 
 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
                      struct vm_area_struct *vma)
 {
+       struct sg_page_iter piter;
        struct sg_table *table = buffer->sg_table;
        unsigned long addr = vma->vm_start;
-       unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
-       struct scatterlist *sg;
-       int i;
        int ret;
 
-       for_each_sg(table->sgl, sg, table->nents, i) {
-               struct page *page = sg_page(sg);
-               unsigned long remainder = vma->vm_end - addr;
-               unsigned long len = sg->length;
+       for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
+               struct page *page = sg_page_iter_page(&piter);
 
-               if (offset >= sg->length) {
-                       offset -= sg->length;
-                       continue;
-               } else if (offset) {
-                       page += offset / PAGE_SIZE;
-                       len = sg->length - offset;
-                       offset = 0;
-               }
-               len = min(len, remainder);
-               ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
+               ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
                                      vma->vm_page_prot);
                if (ret)
                        return ret;
-               addr += len;
+               addr += PAGE_SIZE;
                if (addr >= vma->vm_end)
                        return 0;
        }
        return 0;
 }
 
-static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
-                               pgprot_t pgprot)
+static int ion_heap_sglist_zero(struct sg_table *sgt, pgprot_t pgprot)
 {
        int p = 0;
        int ret = 0;
        struct sg_page_iter piter;
        struct page *pages[32];
 
-       for_each_sg_page(sgl, &piter, nents, 0) {
+       for_each_sgtable_page(sgt, &piter, 0) {
                pages[p++] = sg_page_iter_page(&piter);
                if (p == ARRAY_SIZE(pages)) {
                        ret = ion_heap_clear_pages(pages, p, pgprot);
        else
                pgprot = pgprot_writecombine(PAGE_KERNEL);
 
-       return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
+       return ion_heap_sglist_zero(table, pgprot);
 }
 
 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)