}
 
 /**
- * amdgpu_sync_wait_resv - Wait for BO reservation fences
+ * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
  *
- * @bo: buffer object
+ * @adev: amdgpu device pointer
+ * @resv: reservation object to sync to
+ * @sync_mode: synchronization mode
  * @owner: fence owner
  * @intr: Whether the wait is interruptible
  *
+ * Extract the fences from the reservation object and waits for them to finish.
+ *
  * Returns:
  * 0 on success, errno otherwise.
  */
-int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
+int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
+                            enum amdgpu_sync_mode sync_mode, void *owner,
+                            bool intr)
 {
-       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
        struct amdgpu_sync sync;
        int r;
 
        amdgpu_sync_create(&sync);
-       amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
-                        AMDGPU_SYNC_NE_OWNER, owner);
+       amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
        r = amdgpu_sync_wait(&sync, intr);
        amdgpu_sync_free(&sync);
-
        return r;
 }
 
+/**
+ * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
+ * @bo: buffer object to wait for
+ * @owner: fence owner
+ * @intr: Whether the wait is interruptible
+ *
+ * Wrapper to wait for fences in a BO.
+ * Returns:
+ * 0 on success, errno otherwise.
+ */
+int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
+{
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+       return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
+                                       AMDGPU_SYNC_NE_OWNER, owner, intr);
+}
+
 /**
  * amdgpu_bo_gpu_offset - return GPU offset of bo
  * @bo:        amdgpu object for which we query the offset
 
        params.vm = vm;
        params.direct = direct;
 
-       r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_KFD, NULL);
+       r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT);
        if (r)
                return r;
 
        params.vm = vm;
        params.direct = direct;
 
-       r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_VM, NULL);
+       r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT);
        if (r)
                return r;
 
  * @adev: amdgpu_device pointer
  * @vm: requested vm
  * @direct: direct submission in a page fault
- * @exclusive: fence we need to sync to
+ * @resv: fences we need to sync to
  * @start: start of mapped range
  * @last: last mapped entry
  * @flags: flags for the entries
  */
 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
                                       struct amdgpu_vm *vm, bool direct,
-                                      struct dma_fence *exclusive,
+                                      struct dma_resv *resv,
                                       uint64_t start, uint64_t last,
                                       uint64_t flags, uint64_t addr,
                                       dma_addr_t *pages_addr,
                                       struct dma_fence **fence)
 {
        struct amdgpu_vm_update_params params;
-       void *owner = AMDGPU_FENCE_OWNER_VM;
+       enum amdgpu_sync_mode sync_mode;
        int r;
 
        memset(¶ms, 0, sizeof(params));
        params.direct = direct;
        params.pages_addr = pages_addr;
 
-       /* sync to everything except eviction fences on unmapping */
+       /* Implicitly sync to command submissions in the same VM before
+        * unmapping. Sync to moving fences before mapping.
+        */
        if (!(flags & AMDGPU_PTE_VALID))
-               owner = AMDGPU_FENCE_OWNER_KFD;
+               sync_mode = AMDGPU_SYNC_EQ_OWNER;
+       else
+               sync_mode = AMDGPU_SYNC_EXPLICIT;
 
        amdgpu_vm_eviction_lock(vm);
        if (vm->evicting) {
                goto error_unlock;
        }
 
-       r = vm->update_funcs->prepare(¶ms, owner, exclusive);
+       r = vm->update_funcs->prepare(¶ms, resv, sync_mode);
        if (r)
                goto error_unlock;
 
  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
  *
  * @adev: amdgpu_device pointer
- * @exclusive: fence we need to sync to
+ * @resv: fences we need to sync to
  * @pages_addr: DMA addresses to use for mapping
  * @vm: requested vm
  * @mapping: mapped range and flags to use for the update
  * 0 for success, -EINVAL for failure.
  */
 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
-                                     struct dma_fence *exclusive,
+                                     struct dma_resv *resv,
                                      dma_addr_t *pages_addr,
                                      struct amdgpu_vm *vm,
                                      struct amdgpu_bo_va_mapping *mapping,
                }
 
                last = min((uint64_t)mapping->last, start + max_entries - 1);
-               r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive,
+               r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
                                                start, last, flags, addr,
                                                dma_addr, fence);
                if (r)
        dma_addr_t *pages_addr = NULL;
        struct ttm_mem_reg *mem;
        struct drm_mm_node *nodes;
-       struct dma_fence *exclusive, **last_update;
+       struct dma_fence **last_update;
+       struct dma_resv *resv;
        uint64_t flags;
        struct amdgpu_device *bo_adev = adev;
        int r;
        if (clear || !bo) {
                mem = NULL;
                nodes = NULL;
-               exclusive = NULL;
+               resv = vm->root.base.bo->tbo.base.resv;
        } else {
                struct ttm_dma_tt *ttm;
 
                        ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
                        pages_addr = ttm->dma_address;
                }
-               exclusive = bo->tbo.moving;
+               resv = bo->tbo.base.resv;
        }
 
        if (bo) {
                flags = 0x0;
        }
 
-       if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv))
+       if (clear || (bo && bo->tbo.base.resv ==
+                     vm->root.base.bo->tbo.base.resv))
                last_update = &vm->last_update;
        else
                last_update = &bo_va->last_pt_update;
        }
 
        list_for_each_entry(mapping, &bo_va->invalids, list) {
-               r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
+               r = amdgpu_vm_bo_split_mapping(adev, resv, pages_addr, vm,
                                               mapping, flags, bo_adev, nodes,
                                               last_update);
                if (r)
                          struct amdgpu_vm *vm,
                          struct dma_fence **fence)
 {
+       struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
        struct amdgpu_bo_va_mapping *mapping;
        uint64_t init_pte_value = 0;
        struct dma_fence *f = NULL;
                    mapping->start < AMDGPU_GMC_HOLE_START)
                        init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
 
-               r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL,
+               r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
                                                mapping->start, mapping->last,
                                                init_pte_value, 0, NULL, &f);
                amdgpu_vm_free_mapping(adev, vm, mapping, f);