base->next = bo->vm_bo;
        bo->vm_bo = base;
 
-       if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
+       if (!amdgpu_vm_is_bo_always_valid(vm, bo))
                return;
 
        dma_resv_assert_held(vm->root.bo->tbo.base.resv);
         * For now ignore BOs which are currently locked and potentially
         * changing their location.
         */
-       if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv &&
+       if (!amdgpu_vm_is_bo_always_valid(vm, bo) &&
            !dma_resv_trylock(bo->tbo.base.resv))
                return;
 
        amdgpu_bo_get_memory(bo, stats);
-       if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
-           dma_resv_unlock(bo->tbo.base.resv);
+       if (!amdgpu_vm_is_bo_always_valid(vm, bo))
+               dma_resv_unlock(bo->tbo.base.resv);
 }
 
 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
                uncached = false;
        }
 
-       if (clear || (bo && bo->tbo.base.resv ==
-                     vm->root.bo->tbo.base.resv))
+       if (clear || amdgpu_vm_is_bo_always_valid(vm, bo))
                last_update = &vm->last_update;
        else
                last_update = &bo_va->last_pt_update;
         * the evicted list so that it gets validated again on the
         * next command submission.
         */
-       if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
+       if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
                uint32_t mem_type = bo->tbo.resource->mem_type;
 
                if (!(bo->preferred_domains &
        if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
                amdgpu_vm_prt_get(adev);
 
-       if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
-           !bo_va->base.moved) {
+       if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
                amdgpu_vm_bo_moved(&bo_va->base);
-       }
+
        trace_amdgpu_vm_bo_map(bo_va, mapping);
 }
 
                if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
                        amdgpu_vm_prt_get(adev);
 
-               if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
+               if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
                    !before->bo_va->base.moved)
                        amdgpu_vm_bo_moved(&before->bo_va->base);
        } else {
                if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
                        amdgpu_vm_prt_get(adev);
 
-               if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
+               if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
                    !after->bo_va->base.moved)
                        amdgpu_vm_bo_moved(&after->bo_va->base);
        } else {
 
        if (bo) {
                dma_resv_assert_held(bo->tbo.base.resv);
-               if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
+               if (amdgpu_vm_is_bo_always_valid(vm, bo))
                        ttm_bo_set_bulk_move(&bo->tbo, NULL);
 
                for (base = &bo_va->base.bo->vm_bo; *base;
        for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
                struct amdgpu_vm *vm = bo_base->vm;
 
-               if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
+               if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) {
                        amdgpu_vm_bo_evicted(bo_base);
                        continue;
                }
 
                if (bo->tbo.type == ttm_bo_type_kernel)
                        amdgpu_vm_bo_relocated(bo_base);
-               else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
+               else if (amdgpu_vm_is_bo_always_valid(vm, bo))
                        amdgpu_vm_bo_moved(bo_base);
                else
                        amdgpu_vm_bo_invalidated(bo_base);
        xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
 }
 
+/**
+ * amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
+ *
+ * @vm: VM to test against.
+ * @abo: BO to be tested.
+ *
+ * Returns true if the BO shares the dma_resv object with the root PD and is
+ * always guaranteed to be valid inside the VM.
+ */
+bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
+{
+       return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
+}