params.vm = vm;
        params.immediate = immediate;
 
-       r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT);
+       r = vm->update_funcs->prepare(¶ms, NULL);
        if (r)
                goto error;
 
  * @unlocked: unlocked invalidation during MM callback
  * @flush_tlb: trigger tlb invalidation after update completed
  * @allow_override: change MTYPE for local NUMA nodes
- * @resv: fences we need to sync to
+ * @sync: fences we need to sync to
  * @start: start of mapped range
  * @last: last mapped entry
  * @flags: flags for the entries
  * 0 for success, negative erro code for failure.
  */
 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                          bool immediate, bool unlocked, bool flush_tlb, bool allow_override,
-                          struct dma_resv *resv, uint64_t start, uint64_t last,
-                          uint64_t flags, uint64_t offset, uint64_t vram_base,
+                          bool immediate, bool unlocked, bool flush_tlb,
+                          bool allow_override, struct amdgpu_sync *sync,
+                          uint64_t start, uint64_t last, uint64_t flags,
+                          uint64_t offset, uint64_t vram_base,
                           struct ttm_resource *res, dma_addr_t *pages_addr,
                           struct dma_fence **fence)
 {
        struct amdgpu_vm_tlb_seq_struct *tlb_cb;
        struct amdgpu_vm_update_params params;
        struct amdgpu_res_cursor cursor;
-       enum amdgpu_sync_mode sync_mode;
        int r, idx;
 
        if (!drm_dev_enter(adev_to_drm(adev), &idx))
        params.allow_override = allow_override;
        INIT_LIST_HEAD(¶ms.tlb_flush_waitlist);
 
-       /* Implicitly sync to command submissions in the same VM before
-        * unmapping. Sync to moving fences before mapping.
-        */
-       if (!(flags & AMDGPU_PTE_VALID))
-               sync_mode = AMDGPU_SYNC_EQ_OWNER;
-       else
-               sync_mode = AMDGPU_SYNC_EXPLICIT;
-
        amdgpu_vm_eviction_lock(vm);
        if (vm->evicting) {
                r = -EBUSY;
                dma_fence_put(tmp);
        }
 
-       r = vm->update_funcs->prepare(¶ms, resv, sync_mode);
+       r = vm->update_funcs->prepare(¶ms, sync);
        if (r)
                goto error_free;
 
        struct amdgpu_bo *bo = bo_va->base.bo;
        struct amdgpu_vm *vm = bo_va->base.vm;
        struct amdgpu_bo_va_mapping *mapping;
+       struct dma_fence **last_update;
        dma_addr_t *pages_addr = NULL;
        struct ttm_resource *mem;
-       struct dma_fence **last_update;
+       struct amdgpu_sync sync;
        bool flush_tlb = clear;
-       bool uncached;
-       struct dma_resv *resv;
        uint64_t vram_base;
        uint64_t flags;
+       bool uncached;
        int r;
 
+       amdgpu_sync_create(&sync);
        if (clear || !bo) {
                mem = NULL;
-               resv = vm->root.bo->tbo.base.resv;
+
+               /* Implicitly sync to command submissions in the same VM before
+                * unmapping.
+                */
+               r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
+                                    AMDGPU_SYNC_EQ_OWNER, vm);
+               if (r)
+                       goto error_free;
        } else {
                struct drm_gem_object *obj = &bo->tbo.base;
 
-               resv = bo->tbo.base.resv;
                if (obj->import_attach && bo_va->is_xgmi) {
                        struct dma_buf *dma_buf = obj->import_attach->dmabuf;
                        struct drm_gem_object *gobj = dma_buf->priv;
                if (mem && (mem->mem_type == TTM_PL_TT ||
                            mem->mem_type == AMDGPU_PL_PREEMPT))
                        pages_addr = bo->tbo.ttm->dma_address;
+
+               /* Implicitly sync to moving fences before mapping anything */
+               r = amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
+                                    AMDGPU_SYNC_EXPLICIT, vm);
+               if (r)
+                       goto error_free;
        }
 
        if (bo) {
                trace_amdgpu_vm_bo_update(mapping);
 
                r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
-                                          !uncached, resv, mapping->start, mapping->last,
-                                          update_flags, mapping->offset,
-                                          vram_base, mem, pages_addr,
-                                          last_update);
+                                          !uncached, &sync, mapping->start,
+                                          mapping->last, update_flags,
+                                          mapping->offset, vram_base, mem,
+                                          pages_addr, last_update);
                if (r)
-                       return r;
+                       goto error_free;
        }
 
        /* If the BO is not in its preferred location add it back to
                        trace_amdgpu_vm_bo_mapping(mapping);
        }
 
-       return 0;
+error_free:
+       amdgpu_sync_free(&sync);
+       return r;
 }
 
 /**
                          struct amdgpu_vm *vm,
                          struct dma_fence **fence)
 {
-       struct dma_resv *resv = vm->root.bo->tbo.base.resv;
        struct amdgpu_bo_va_mapping *mapping;
-       uint64_t init_pte_value = 0;
        struct dma_fence *f = NULL;
+       struct amdgpu_sync sync;
        int r;
 
+
+       /*
+        * Implicitly sync to command submissions in the same VM before
+        * unmapping.
+        */
+       amdgpu_sync_create(&sync);
+       r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
+                            AMDGPU_SYNC_EQ_OWNER, vm);
+       if (r)
+               goto error_free;
+
        while (!list_empty(&vm->freed)) {
                mapping = list_first_entry(&vm->freed,
                        struct amdgpu_bo_va_mapping, list);
                list_del(&mapping->list);
 
                r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
-                                          resv, mapping->start, mapping->last,
-                                          init_pte_value, 0, 0, NULL, NULL,
-                                          &f);
+                                          &sync, mapping->start, mapping->last,
+                                          0, 0, 0, NULL, NULL, &f);
                amdgpu_vm_free_mapping(adev, vm, mapping, f);
                if (r) {
                        dma_fence_put(f);
-                       return r;
+                       goto error_free;
                }
        }
 
                dma_fence_put(f);
        }
 
-       return 0;
+error_free:
+       amdgpu_sync_free(&sync);
+       return r;
 
 }
 
 
 
 struct amdgpu_vm_update_funcs {
        int (*map_table)(struct amdgpu_bo_vm *bo);
-       int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
-                      enum amdgpu_sync_mode sync_mode);
+       int (*prepare)(struct amdgpu_vm_update_params *p,
+                      struct amdgpu_sync *sync);
        int (*update)(struct amdgpu_vm_update_params *p,
                      struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
                      unsigned count, uint32_t incr, uint64_t flags);
 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
                            struct amdgpu_vm *vm, struct amdgpu_bo *bo);
 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                          bool immediate, bool unlocked, bool flush_tlb, bool allow_override,
-                          struct dma_resv *resv, uint64_t start, uint64_t last,
-                          uint64_t flags, uint64_t offset, uint64_t vram_base,
+                          bool immediate, bool unlocked, bool flush_tlb,
+                          bool allow_override, struct amdgpu_sync *sync,
+                          uint64_t start, uint64_t last, uint64_t flags,
+                          uint64_t offset, uint64_t vram_base,
                           struct ttm_resource *res, dma_addr_t *pages_addr,
                           struct dma_fence **fence);
 int amdgpu_vm_bo_update(struct amdgpu_device *adev,