spin_lock(&vm->status_lock);
                        list_add(&entry->base.vm_status, &vm->relocated);
                        spin_unlock(&vm->status_lock);
-                       entry->addr = 0;
                }
 
                if (level < adev->vm_manager.num_level) {
        pt = amdgpu_bo_gpu_offset(bo);
        pt = amdgpu_gart_get_vm_pde(adev, pt);
        /* Don't update huge pages here */
-       if (entry->addr & AMDGPU_PDE_PTE ||
-           entry->addr == (pt | AMDGPU_PTE_VALID)) {
+       if (entry->huge) {
                if (!vm->use_cpu_for_update)
                        amdgpu_job_free(job);
                return 0;
        }
 
-       entry->addr = pt | AMDGPU_PTE_VALID;
-
        if (shadow) {
                pde = shadow_addr + (entry - parent->entries) * 8;
                params.func(¶ms, pde, pt, 1, 0, AMDGPU_PTE_VALID);
                if (!entry->base.bo)
                        continue;
 
-               entry->addr = ~0ULL;
                spin_lock(&vm->status_lock);
                if (list_empty(&entry->base.vm_status))
                        list_add(&entry->base.vm_status, &vm->relocated);
                flags |= AMDGPU_PDE_PTE;
        }
 
-       if (entry->addr == (dst | flags))
+       if (!entry->huge && !(flags & AMDGPU_PDE_PTE))
                return;
 
-       entry->addr = (dst | flags);
+       entry->huge = !!(flags & AMDGPU_PDE_PTE);
 
        if (use_cpu_update) {
                /* In case a huge page is replaced with a system
                amdgpu_vm_handle_huge_pages(params, entry, parent,
                                            nptes, dst, flags);
                /* We don't need to update PTEs for huge pages */
-               if (entry->addr & AMDGPU_PDE_PTE)
+               if (entry->huge)
                        continue;
 
                pt = entry->base.bo;