while (cursor.pfn < frag_start) {
                                amdgpu_vm_free_pts(adev, params->vm, &cursor);
                                amdgpu_vm_pt_next(adev, &cursor);
+                               params->table_freed = true;
                        }
 
                } else if (frag >= shift) {
  * @res: ttm_resource to map
  * @pages_addr: DMA addresses to use for mapping
  * @fence: optional resulting fence
+ * @table_freed: return true if page table is freed
  *
  * Fill in the page table entries between @start and @last.
  *
                                uint64_t flags, uint64_t offset,
                                struct ttm_resource *res,
                                dma_addr_t *pages_addr,
-                               struct dma_fence **fence)
+                               struct dma_fence **fence,
+                               bool *table_freed)
 {
        struct amdgpu_vm_update_params params;
        struct amdgpu_res_cursor cursor;
 
        r = vm->update_funcs->commit(¶ms, fence);
 
+       if (table_freed)
+               *table_freed = params.table_freed;
+
 error_unlock:
        amdgpu_vm_eviction_unlock(vm);
        return r;
                                                resv, mapping->start,
                                                mapping->last, update_flags,
                                                mapping->offset, mem,
-                                               pages_addr, last_update);
+                                               pages_addr, last_update, NULL);
                if (r)
                        return r;
        }
                r = amdgpu_vm_bo_update_mapping(adev, adev, vm, false, false,
                                                resv, mapping->start,
                                                mapping->last, init_pte_value,
-                                               0, NULL, NULL, &f);
+                                               0, NULL, NULL, &f, NULL);
                amdgpu_vm_free_mapping(adev, vm, mapping, f);
                if (r) {
                        dma_fence_put(f);
        }
 
        r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr,
-                                       addr, flags, value, NULL, NULL,
+                                       addr, flags, value, NULL, NULL, NULL,
                                        NULL);
        if (r)
                goto error_unlock;
 
         * @num_dw_left: number of dw left for the IB
         */
        unsigned int num_dw_left;
+
+       /**
+        * @table_freed: return true if page table is freed when updating
+        */
+       bool table_freed;
 };
 
 struct amdgpu_vm_update_funcs {
                                uint64_t flags, uint64_t offset,
                                struct ttm_resource *res,
                                dma_addr_t *pages_addr,
-                               struct dma_fence **fence);
+                               struct dma_fence **fence, bool *free_table);
 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
                        struct amdgpu_bo_va *bo_va,
                        bool clear);
 
 
        return amdgpu_vm_bo_update_mapping(adev, adev, vm, false, true, NULL,
                                           start, last, init_pte_value, 0,
-                                          NULL, NULL, fence);
+                                          NULL, NULL, fence, NULL);
 }
 
 static int
                     struct amdgpu_device *bo_adev, struct dma_fence **fence)
 {
        struct amdgpu_bo_va bo_va;
+       bool table_freed = false;
        uint64_t pte_flags;
        int r = 0;
 
                                        prange->mapping.last, pte_flags,
                                        prange->mapping.offset,
                                        prange->ttm_res,
-                                       dma_addr, &vm->last_update);
+                                       dma_addr, &vm->last_update,
+                                       &table_freed);
        if (r) {
                pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
                goto out;
        if (fence)
                *fence = dma_fence_get(vm->last_update);
 
+       if (table_freed) {
+               struct kfd_process *p;
+
+               p = container_of(prange->svms, struct kfd_process, svms);
+               amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev,
+                                                 p->pasid);
+       }
 out:
        prange->mapping.bo_va = NULL;
        return r;