goto out;
        }
 
-       /*
-        * Make it present again, Depending on how arch implementes non
-        * accessible ptes, some can allow access by kernel mode.
-        */
-       old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
+       /* Get the normal PTE  */
+       old_pte = ptep_get(vmf->pte);
        pte = pte_modify(old_pte, vma->vm_page_prot);
-       pte = pte_mkyoung(pte);
-       if (was_writable)
-               pte = pte_mkwrite(pte);
-       ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
-       update_mmu_cache(vma, vmf->address, vmf->pte);
 
        page = vm_normal_page(vma, vmf->address, pte);
-       if (!page) {
-               pte_unmap_unlock(vmf->pte, vmf->ptl);
-               return 0;
-       }
+       if (!page)
+               goto out_map;
 
        /* TODO: handle PTE-mapped THP */
-       if (PageCompound(page)) {
-               pte_unmap_unlock(vmf->pte, vmf->ptl);
-               return 0;
-       }
+       if (PageCompound(page))
+               goto out_map;
 
        /*
         * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
         * pte_dirty has unpredictable behaviour between PTE scan updates,
         * background writeback, dirty balancing and application behaviour.
         */
-       if (!pte_write(pte))
+       if (!was_writable)
                flags |= TNF_NO_GROUP;
 
        /*
        page_nid = page_to_nid(page);
        target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
                        &flags);
-       pte_unmap_unlock(vmf->pte, vmf->ptl);
        if (target_nid == NUMA_NO_NODE) {
                put_page(page);
-               goto out;
+               goto out_map;
        }
+       pte_unmap_unlock(vmf->pte, vmf->ptl);
 
        /* Migrate to the requested node */
        if (migrate_misplaced_page(page, vma, target_nid)) {
                page_nid = target_nid;
                flags |= TNF_MIGRATED;
-       } else
+       } else {
                flags |= TNF_MIGRATE_FAIL;
+               vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
+               spin_lock(vmf->ptl);
+               if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
+                       pte_unmap_unlock(vmf->pte, vmf->ptl);
+                       goto out;
+               }
+               goto out_map;
+       }
 
 out:
        if (page_nid != NUMA_NO_NODE)
                task_numa_fault(last_cpupid, page_nid, 1, flags);
        return 0;
+out_map:
+       /*
+        * Make it present again, depending on how arch implements
+        * non-accessible ptes, some can allow access by kernel mode.
+        */
+       old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
+       pte = pte_modify(old_pte, vma->vm_page_prot);
+       pte = pte_mkyoung(pte);
+       if (was_writable)
+               pte = pte_mkwrite(pte);
+       ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
+       update_mmu_cache(vma, vmf->address, vmf->pte);
+       pte_unmap_unlock(vmf->pte, vmf->ptl);
+       goto out;
 }
 
 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)