unsigned count);
        /* write pte one entry at a time with addr mapping */
        void (*write_pte)(struct amdgpu_ib *ib,
-                         uint64_t pe,
+                         const dma_addr_t *pages_addr, uint64_t pe,
                          uint64_t addr, unsigned count,
                          uint32_t incr, uint32_t flags);
        /* for linear pte/pde updates without addr mapping */
 void amdgpu_vm_flush(struct amdgpu_ring *ring,
                     struct amdgpu_vm *vm,
                     struct fence *updates);
-uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
+uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
                                    struct amdgpu_vm *vm);
 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
 #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
 #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
-#define amdgpu_vm_write_pte(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (addr), (count), (incr), (flags)))
+#define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags)))
 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
 #define amdgpu_vm_pad_ib(adev, ib) ((adev)->vm_manager.vm_pte_funcs->pad_ib((ib)))
 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
 
                uint64_t src = adev->gart.table_addr + (addr >> 12) * 8;
                amdgpu_vm_copy_pte(adev, ib, pe, src, count);
 
-       } else if ((flags & AMDGPU_PTE_SYSTEM) || (count < 3)) {
-               amdgpu_vm_write_pte(adev, ib, pe, addr,
-                                     count, incr, flags);
+       } else if (flags & AMDGPU_PTE_SYSTEM) {
+               dma_addr_t *pages_addr = adev->gart.pages_addr;
+               amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr,
+                                   count, incr, flags);
+
+       } else if (count < 3) {
+               amdgpu_vm_write_pte(adev, ib, NULL, pe, addr,
+                                   count, incr, flags);
 
        } else {
                amdgpu_vm_set_pte_pde(adev, ib, pe, addr,
 }
 
 /**
- * amdgpu_vm_map_gart - get the physical address of a gart page
+ * amdgpu_vm_map_gart - Resolve gart mapping of addr
  *
- * @adev: amdgpu_device pointer
+ * @pages_addr: optional DMA address to use for lookup
  * @addr: the unmapped addr
  *
  * Look up the physical address of the page that the pte resolves
- * to (cayman+).
- * Returns the physical address of the page.
+ * to and return the pointer for the page table entry.
  */
-uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr)
+uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
 {
        uint64_t result;
 
-       /* page table offset */
-       result = adev->gart.pages_addr[addr >> PAGE_SHIFT];
+       if (pages_addr) {
+               /* page table offset */
+               result = pages_addr[addr >> PAGE_SHIFT];
+
+               /* in case cpu page size != gpu page size*/
+               result |= addr & (~PAGE_MASK);
+
+       } else {
+               /* No mapping required */
+               result = addr;
+       }
 
-       /* in case cpu page size != gpu page size*/
-       result |= addr & (~PAGE_MASK);
+       result &= 0xFFFFFFFFFFFFF000ULL;
 
        return result;
 }
 
  * Update PTEs by writing them manually using sDMA (CIK).
  */
 static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
-                                 uint64_t pe,
+                                 const dma_addr_t *pages_addr, uint64_t pe,
                                  uint64_t addr, unsigned count,
                                  uint32_t incr, uint32_t flags)
 {
                ib->ptr[ib->length_dw++] = upper_32_bits(pe);
                ib->ptr[ib->length_dw++] = ndw;
                for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-                       if (flags & AMDGPU_PTE_SYSTEM) {
-                               value = amdgpu_vm_map_gart(ib->ring->adev, addr);
-                               value &= 0xFFFFFFFFFFFFF000ULL;
-                       } else if (flags & AMDGPU_PTE_VALID) {
-                               value = addr;
-                       } else {
-                               value = 0;
-                       }
+                       value = amdgpu_vm_map_gart(pages_addr, addr);
                        addr += incr;
                        value |= flags;
                        ib->ptr[ib->length_dw++] = value;
 
  * Update PTEs by writing them manually using sDMA (CIK).
  */
 static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib,
-                                  uint64_t pe,
+                                  const dma_addr_t *pages_addr, uint64_t pe,
                                   uint64_t addr, unsigned count,
                                   uint32_t incr, uint32_t flags)
 {
                ib->ptr[ib->length_dw++] = upper_32_bits(pe);
                ib->ptr[ib->length_dw++] = ndw;
                for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-                       if (flags & AMDGPU_PTE_SYSTEM) {
-                               value = amdgpu_vm_map_gart(ib->ring->adev, addr);
-                               value &= 0xFFFFFFFFFFFFF000ULL;
-                       } else if (flags & AMDGPU_PTE_VALID) {
-                               value = addr;
-                       } else {
-                               value = 0;
-                       }
+                       value = amdgpu_vm_map_gart(pages_addr, addr);
                        addr += incr;
                        value |= flags;
                        ib->ptr[ib->length_dw++] = value;
 
  * Update PTEs by writing them manually using sDMA (CIK).
  */
 static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib,
-                                  uint64_t pe,
+                                  const dma_addr_t *pages_addr, uint64_t pe,
                                   uint64_t addr, unsigned count,
                                   uint32_t incr, uint32_t flags)
 {
                ib->ptr[ib->length_dw++] = upper_32_bits(pe);
                ib->ptr[ib->length_dw++] = ndw;
                for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-                       if (flags & AMDGPU_PTE_SYSTEM) {
-                               value = amdgpu_vm_map_gart(ib->ring->adev, addr);
-                               value &= 0xFFFFFFFFFFFFF000ULL;
-                       } else if (flags & AMDGPU_PTE_VALID) {
-                               value = addr;
-                       } else {
-                               value = 0;
-                       }
+                       value = amdgpu_vm_map_gart(pages_addr, addr);
                        addr += incr;
                        value |= flags;
                        ib->ptr[ib->length_dw++] = value;