unsigned pfn, struct ttm_mem_reg *mem,
                        unsigned npages, uint32_t flags)
 {
-       void __iomem *ptr = (void *)vm->pt;
-       uint64_t addr;
+       struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
+       uint64_t addr, pt = vm->pt_gpu_addr + pfn * 8;
        int i;
 
        addr = flags = cayman_vm_page_flags(rdev, flags);
 
-        for (i = 0; i < npages; ++i, ++pfn) {
-                if (mem) {
-                        addr = radeon_vm_get_addr(rdev, mem, i);
+       radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, 1 + npages * 2));
+       radeon_ring_write(ring, pt & 0xffffffff);
+       radeon_ring_write(ring, (pt >> 32) & 0xff);
+       for (i = 0; i < npages; ++i) {
+               if (mem) {
+                       addr = radeon_vm_get_addr(rdev, mem, i);
                        addr = addr & 0xFFFFFFFFFFFFF000ULL;
                        addr |= flags;
-                }
-               writeq(addr, ptr + (pfn * 8));
-        }
+               }
+               radeon_ring_write(ring, addr & 0xffffffff);
+               radeon_ring_write(ring, (addr >> 32) & 0xffffffff);
+       }
 }
 
 void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ib *ib)
 
                        continue;
 
                list_for_each_entry(bo_va, &vm->va, vm_list) {
-                       struct ttm_mem_reg *mem = NULL;
-                       if (bo_va->valid)
-                               mem = &bo_va->bo->tbo.mem;
-
                        bo_va->valid = false;
-                       r = radeon_vm_bo_update_pte(rdev, vm, bo_va->bo, mem);
-                       if (r) {
-                               DRM_ERROR("Failed to update pte for vm %d!\n", vm->id);
-                       }
                }
        }
        return 0;
        return addr;
 }
 
-/* object have to be reserved & global and local mutex must be locked */
 /**
  * radeon_vm_bo_update_pte - map a bo into the vm page table
  *
  *
  * Fill in the page table entries for @bo (cayman+).
  * Returns 0 for success, -EINVAL for failure.
+ *
+ * Object have to be reserved & global and local mutex must be locked!
  */
 int radeon_vm_bo_update_pte(struct radeon_device *rdev,
                            struct radeon_vm *vm,
                            struct radeon_bo *bo,
                            struct ttm_mem_reg *mem)
 {
+       unsigned ridx = rdev->asic->vm.pt_ring_index;
+       struct radeon_ring *ring = &rdev->ring[ridx];
+       struct radeon_semaphore *sem = NULL;
        struct radeon_bo_va *bo_va;
-       unsigned ngpu_pages;
+       unsigned ngpu_pages, ndw;
        uint64_t pfn;
+       int r;
 
        /* nothing to do if vm isn't bound */
        if (vm->sa_bo == NULL)
                return -EINVAL;
        }
 
-       if (bo_va->valid && mem)
+       if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
                return 0;
 
        ngpu_pages = radeon_bo_ngpu_pages(bo);
                if (mem->mem_type == TTM_PL_TT) {
                        bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
                }
-       }
-       if (!bo_va->valid) {
-               mem = NULL;
+               if (!bo_va->valid) {
+                       mem = NULL;
+               }
+       } else {
+               bo_va->valid = false;
        }
        pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
-       radeon_asic_vm_set_page(rdev, bo_va->vm, pfn, mem, ngpu_pages, bo_va->flags);
+
+       if (vm->fence && radeon_fence_signaled(vm->fence)) {
+               radeon_fence_unref(&vm->fence);
+       }
+
+       if (vm->fence && vm->fence->ring != ridx) {
+               r = radeon_semaphore_create(rdev, &sem);
+               if (r) {
+                       return r;
+               }
+       }
+
+       /* estimate number of dw needed */
+       ndw = 32;
+       ndw += (ngpu_pages >> 12) * 3;
+       ndw += ngpu_pages * 2;
+
+       r = radeon_ring_lock(rdev, ring, ndw);
+       if (r) {
+               return r;
+       }
+
+       if (sem && radeon_fence_need_sync(vm->fence, ridx)) {
+               radeon_semaphore_sync_rings(rdev, sem, vm->fence->ring, ridx);
+               radeon_fence_note_sync(vm->fence, ridx);
+       }
+
+       radeon_asic_vm_set_page(rdev, vm, pfn, mem, ngpu_pages, bo_va->flags);
+
+       radeon_fence_unref(&vm->fence);
+       r = radeon_fence_emit(rdev, &vm->fence, ridx);
+       if (r) {
+               radeon_ring_unlock_undo(rdev, ring);
+               return r;
+       }
+       radeon_ring_unlock_commit(rdev, ring);
+       radeon_semaphore_free(rdev, &sem, vm->fence);
        radeon_fence_unref(&vm->last_flush);
        return 0;
 }
                     struct radeon_bo *bo)
 {
        struct radeon_bo_va *bo_va;
+       int r;
 
        bo_va = radeon_bo_va(bo, vm);
        if (bo_va == NULL)
 
        mutex_lock(&rdev->vm_manager.lock);
        mutex_lock(&vm->mutex);
-       radeon_vm_free_pt(rdev, vm);
+       r = radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
        mutex_unlock(&rdev->vm_manager.lock);
        list_del(&bo_va->vm_list);
        mutex_unlock(&vm->mutex);
        list_del(&bo_va->bo_list);
 
        kfree(bo_va);
-       return 0;
+       return r;
 }
 
 /**