if (r)
                return r;
 
-       r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL);
+       r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
        if (r)
                return r;
 
        if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
                bo_va = fpriv->csa_va;
                BUG_ON(!bo_va);
-               r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+               r = amdgpu_vm_bo_update(adev, bo_va, false);
                if (r)
                        return r;
 
                if (bo_va == NULL)
                        continue;
 
-               r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+               r = amdgpu_vm_bo_update(adev, bo_va, false);
                if (r)
                        return r;
 
 
 
        if (operation == AMDGPU_VA_OP_MAP ||
            operation == AMDGPU_VA_OP_REPLACE) {
-               r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+               r = amdgpu_vm_bo_update(adev, bo_va, false);
                if (r)
                        goto error;
        }
 
        r = vm->update_funcs->commit(¶ms, fence);
 
        if (table_freed)
-               *table_freed = *table_freed || params.table_freed;
+               *table_freed = params.table_freed;
 
 error_unlock:
        amdgpu_vm_eviction_unlock(vm);
  * 0 for success, -EINVAL for failure.
  */
 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
-                       bool clear, bool *table_freed)
+                       bool clear)
 {
        struct amdgpu_bo *bo = bo_va->base.bo;
        struct amdgpu_vm *vm = bo_va->base.vm;
                                                resv, mapping->start,
                                                mapping->last, update_flags,
                                                mapping->offset, mem,
-                                               pages_addr, last_update, table_freed);
+                                               pages_addr, last_update, NULL);
                if (r)
                        return r;
        }
 
        list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
                /* Per VM BOs never need to bo cleared in the page tables */
-               r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+               r = amdgpu_vm_bo_update(adev, bo_va, false);
                if (r)
                        return r;
        }
                else
                        clear = true;
 
-               r = amdgpu_vm_bo_update(adev, bo_va, clear, NULL);
+               r = amdgpu_vm_bo_update(adev, bo_va, clear);
                if (r)
                        return r;
 
 
                                struct dma_fence **fence, bool *free_table);
 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
                        struct amdgpu_bo_va *bo_va,
-                       bool clear, bool *table_freed);
+                       bool clear);
 bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
                             struct amdgpu_bo *bo, bool evicted);