#define AMDGPU_GEM_DOMAIN_MAX          0x3
 
-enum amdgpu_bo_shadow {
-       AMDGPU_BO_SHADOW_TO_NONE = 0,
-       AMDGPU_BO_SHADOW_TO_PARENT,
-       AMDGPU_BO_SHADOW_TO_SHADOW,
-};
-
 struct amdgpu_bo {
        /* Protected by gem.mutex */
        struct list_head                list;
        struct drm_gem_object           gem_base;
        struct amdgpu_bo                *parent;
        struct amdgpu_bo                *shadow;
-       /* indicate if need to sync between bo and shadow */
-       enum amdgpu_bo_shadow           backup_shadow;
 
        struct ttm_bo_kmap_obj          dma_buf_vmap;
        struct amdgpu_mn                *mn;
 
        return r;
 }
 
+int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
+                              struct amdgpu_ring *ring,
+                              struct amdgpu_bo *bo,
+                              struct reservation_object *resv,
+                              struct fence **fence,
+                              bool direct)
+
+{
+       struct amdgpu_bo *shadow = bo->shadow;
+       uint64_t bo_addr, shadow_addr;
+       int r;
+
+       if (!shadow)
+               return -EINVAL;
+
+       bo_addr = amdgpu_bo_gpu_offset(bo);
+       shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
+
+       r = reservation_object_reserve_shared(bo->tbo.resv);
+       if (r)
+               goto err;
+
+       r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
+                              amdgpu_bo_size(bo), resv, fence,
+                              direct);
+       if (!r)
+               amdgpu_bo_fence(bo, *fence, true);
+
+err:
+       return r;
+}
+
+int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
+                                 struct amdgpu_ring *ring,
+                                 struct amdgpu_bo *bo,
+                                 struct reservation_object *resv,
+                                 struct fence **fence,
+                                 bool direct)
+
+{
+       struct amdgpu_bo *shadow = bo->shadow;
+       uint64_t bo_addr, shadow_addr;
+       int r;
+
+       if (!shadow)
+               return -EINVAL;
+
+       bo_addr = amdgpu_bo_gpu_offset(bo);
+       shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
+
+       r = reservation_object_reserve_shared(bo->tbo.resv);
+       if (r)
+               goto err;
+
+       r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
+                              amdgpu_bo_size(bo), resv, fence,
+                              direct);
+       if (!r)
+               amdgpu_bo_fence(bo, *fence, true);
+
+err:
+       return r;
+}
+
 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
 {
        bool is_iomem;
 
 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
                     bool shared);
 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
+int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
+                              struct amdgpu_ring *ring,
+                              struct amdgpu_bo *bo,
+                              struct reservation_object *resv,
+                              struct fence **fence, bool direct);
+int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
+                                 struct amdgpu_ring *ring,
+                                 struct amdgpu_bo *bo,
+                                 struct reservation_object *resv,
+                                 struct fence **fence,
+                                 bool direct);
+
 
 /*
  * sub allocation