#endif
 
 #ifdef CONFIG_NUMA_BALANCING
-extern bool pmd_trans_migrating(pmd_t pmd);
 extern int migrate_misplaced_page(struct page *page,
                                  struct vm_area_struct *vma, int node);
 #else
-static inline bool pmd_trans_migrating(pmd_t pmd)
-{
-       return false;
-}
 static inline int migrate_misplaced_page(struct page *page,
                                         struct vm_area_struct *vma, int node)
 {
 }
 #endif /* CONFIG_NUMA_BALANCING */
 
-#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
-extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
-                       struct vm_area_struct *vma,
-                       pmd_t *pmd, pmd_t entry,
-                       unsigned long address,
-                       struct page *page, int node);
-#else
-static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
-                       struct vm_area_struct *vma,
-                       pmd_t *pmd, pmd_t entry,
-                       unsigned long address,
-                       struct page *page, int node)
-{
-       return -EAGAIN;
-}
-#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
-
-
 #ifdef CONFIG_MIGRATION
 
 /*
 
 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
-       pmd_t pmd = vmf->orig_pmd;
-       struct anon_vma *anon_vma = NULL;
+       pmd_t oldpmd = vmf->orig_pmd;
+       pmd_t pmd;
        struct page *page;
        unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
-       int page_nid = NUMA_NO_NODE, this_nid = numa_node_id();
+       int page_nid = NUMA_NO_NODE;
        int target_nid, last_cpupid = -1;
-       bool page_locked;
        bool migrated = false;
-       bool was_writable;
+       bool was_writable = pmd_savedwrite(oldpmd);
        int flags = 0;
 
        vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
-       if (unlikely(!pmd_same(pmd, *vmf->pmd)))
-               goto out_unlock;
-
-       /*
-        * If there are potential migrations, wait for completion and retry
-        * without disrupting NUMA hinting information. Do not relock and
-        * check_same as the page may no longer be mapped.
-        */
-       if (unlikely(pmd_trans_migrating(*vmf->pmd))) {
-               page = pmd_page(*vmf->pmd);
-               if (!get_page_unless_zero(page))
-                       goto out_unlock;
-               spin_unlock(vmf->ptl);
-               put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
-               goto out;
-       }
-
-       page = pmd_page(pmd);
-       BUG_ON(is_huge_zero_page(page));
-       page_nid = page_to_nid(page);
-       last_cpupid = page_cpupid_last(page);
-       count_vm_numa_event(NUMA_HINT_FAULTS);
-       if (page_nid == this_nid) {
-               count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
-               flags |= TNF_FAULT_LOCAL;
-       }
-
-       /* See similar comment in do_numa_page for explanation */
-       if (!pmd_savedwrite(pmd))
-               flags |= TNF_NO_GROUP;
-
-       /*
-        * Acquire the page lock to serialise THP migrations but avoid dropping
-        * page_table_lock if at all possible
-        */
-       page_locked = trylock_page(page);
-       target_nid = mpol_misplaced(page, vma, haddr);
-       /* Migration could have started since the pmd_trans_migrating check */
-       if (!page_locked) {
-               page_nid = NUMA_NO_NODE;
-               if (!get_page_unless_zero(page))
-                       goto out_unlock;
+       if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
                spin_unlock(vmf->ptl);
-               put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
                goto out;
-       } else if (target_nid == NUMA_NO_NODE) {
-               /* There are no parallel migrations and page is in the right
-                * node. Clear the numa hinting info in this pmd.
-                */
-               goto clear_pmdnuma;
-       }
-
-       /*
-        * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
-        * to serialises splits
-        */
-       get_page(page);
-       spin_unlock(vmf->ptl);
-       anon_vma = page_lock_anon_vma_read(page);
-
-       /* Confirm the PMD did not change while page_table_lock was released */
-       spin_lock(vmf->ptl);
-       if (unlikely(!pmd_same(pmd, *vmf->pmd))) {
-               unlock_page(page);
-               put_page(page);
-               page_nid = NUMA_NO_NODE;
-               goto out_unlock;
-       }
-
-       /* Bail if we fail to protect against THP splits for any reason */
-       if (unlikely(!anon_vma)) {
-               put_page(page);
-               page_nid = NUMA_NO_NODE;
-               goto clear_pmdnuma;
        }
 
        /*
                                              haddr + HPAGE_PMD_SIZE);
        }
 
-       /*
-        * Migrate the THP to the requested node, returns with page unlocked
-        * and access rights restored.
-        */
+       pmd = pmd_modify(oldpmd, vma->vm_page_prot);
+       page = vm_normal_page_pmd(vma, haddr, pmd);
+       if (!page)
+               goto out_map;
+
+       /* See similar comment in do_numa_page for explanation */
+       if (!was_writable)
+               flags |= TNF_NO_GROUP;
+
+       page_nid = page_to_nid(page);
+       last_cpupid = page_cpupid_last(page);
+       target_nid = numa_migrate_prep(page, vma, haddr, page_nid,
+                                      &flags);
+
+       if (target_nid == NUMA_NO_NODE) {
+               put_page(page);
+               goto out_map;
+       }
+
        spin_unlock(vmf->ptl);
 
-       migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma,
-                               vmf->pmd, pmd, vmf->address, page, target_nid);
+       migrated = migrate_misplaced_page(page, vma, target_nid);
        if (migrated) {
                flags |= TNF_MIGRATED;
                page_nid = target_nid;
-       } else
+       } else {
                flags |= TNF_MIGRATE_FAIL;
-
-       goto out;
-clear_pmdnuma:
-       BUG_ON(!PageLocked(page));
-       was_writable = pmd_savedwrite(pmd);
-       pmd = pmd_modify(pmd, vma->vm_page_prot);
-       pmd = pmd_mkyoung(pmd);
-       if (was_writable)
-               pmd = pmd_mkwrite(pmd);
-       set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
-       update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
-       unlock_page(page);
-out_unlock:
-       spin_unlock(vmf->ptl);
+               vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+               if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
+                       spin_unlock(vmf->ptl);
+                       goto out;
+               }
+               goto out_map;
+       }
 
 out:
-       if (anon_vma)
-               page_unlock_anon_vma_read(anon_vma);
-
        if (page_nid != NUMA_NO_NODE)
                task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
                                flags);
 
        return 0;
+
+out_map:
+       /* Restore the PMD */
+       pmd = pmd_modify(oldpmd, vma->vm_page_prot);
+       pmd = pmd_mkyoung(pmd);
+       if (was_writable)
+               pmd = pmd_mkwrite(pmd);
+       set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
+       update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
+       spin_unlock(vmf->ptl);
+       goto out;
 }
 
 /*
 
  */
 extern void clear_page_mlock(struct page *page);
 
-/*
- * mlock_migrate_page - called only from migrate_misplaced_transhuge_page()
- * (because that does not go through the full procedure of migration ptes):
- * to migrate the Mlocked page flag; update statistics.
- */
-static inline void mlock_migrate_page(struct page *newpage, struct page *page)
-{
-       if (TestClearPageMlocked(page)) {
-               int nr_pages = thp_nr_pages(page);
-
-               /* Holding pmd lock, no change in irq context: __mod is safe */
-               __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
-               SetPageMlocked(newpage);
-               __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
-       }
-}
-
 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
 
 /*
 #else /* !CONFIG_MMU */
 static inline void clear_page_mlock(struct page *page) { }
 static inline void mlock_vma_page(struct page *page) { }
-static inline void mlock_migrate_page(struct page *new, struct page *old) { }
 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
 {
 }
 
        return newpage;
 }
 
+static struct page *alloc_misplaced_dst_page_thp(struct page *page,
+                                                unsigned long data)
+{
+       int nid = (int) data;
+       struct page *newpage;
+
+       newpage = alloc_pages_node(nid, (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
+                                  HPAGE_PMD_ORDER);
+       if (!newpage)
+               goto out;
+
+       prep_transhuge_page(newpage);
+
+out:
+       return newpage;
+}
+
 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
 {
        int page_lru;
        return 1;
 }
 
-bool pmd_trans_migrating(pmd_t pmd)
-{
-       struct page *page = pmd_page(pmd);
-       return PageLocked(page);
-}
-
 /*
  * Attempt to migrate a misplaced page to the specified destination
  * node. Caller is expected to have an elevated reference count on
        int isolated;
        int nr_remaining;
        LIST_HEAD(migratepages);
+       new_page_t *new;
+       bool compound;
+
+       /*
+        * PTE mapped THP or HugeTLB page can't reach here so the page could
+        * be either base page or THP.  And it must be head page if it is
+        * THP.
+        */
+       compound = PageTransHuge(page);
+
+       if (compound)
+               new = alloc_misplaced_dst_page_thp;
+       else
+               new = alloc_misplaced_dst_page;
 
        /*
         * Don't migrate file pages that are mapped in multiple processes
                goto out;
 
        list_add(&page->lru, &migratepages);
-       nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
-                                    NULL, node, MIGRATE_ASYNC,
-                                    MR_NUMA_MISPLACED);
+       nr_remaining = migrate_pages(&migratepages, *new, NULL, node,
+                                    MIGRATE_ASYNC, MR_NUMA_MISPLACED);
        if (nr_remaining) {
                if (!list_empty(&migratepages)) {
                        list_del(&page->lru);
        return 0;
 }
 #endif /* CONFIG_NUMA_BALANCING */
-
-#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
-/*
- * Migrates a THP to a given target node. page must be locked and is unlocked
- * before returning.
- */
-int migrate_misplaced_transhuge_page(struct mm_struct *mm,
-                               struct vm_area_struct *vma,
-                               pmd_t *pmd, pmd_t entry,
-                               unsigned long address,
-                               struct page *page, int node)
-{
-       spinlock_t *ptl;
-       pg_data_t *pgdat = NODE_DATA(node);
-       int isolated = 0;
-       struct page *new_page = NULL;
-       int page_lru = page_is_file_lru(page);
-       unsigned long start = address & HPAGE_PMD_MASK;
-
-       new_page = alloc_pages_node(node,
-               (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
-               HPAGE_PMD_ORDER);
-       if (!new_page)
-               goto out_fail;
-       prep_transhuge_page(new_page);
-
-       isolated = numamigrate_isolate_page(pgdat, page);
-       if (!isolated) {
-               put_page(new_page);
-               goto out_fail;
-       }
-
-       /* Prepare a page as a migration target */
-       __SetPageLocked(new_page);
-       if (PageSwapBacked(page))
-               __SetPageSwapBacked(new_page);
-
-       /* anon mapping, we can simply copy page->mapping to the new page: */
-       new_page->mapping = page->mapping;
-       new_page->index = page->index;
-       /* flush the cache before copying using the kernel virtual address */
-       flush_cache_range(vma, start, start + HPAGE_PMD_SIZE);
-       migrate_page_copy(new_page, page);
-       WARN_ON(PageLRU(new_page));
-
-       /* Recheck the target PMD */
-       ptl = pmd_lock(mm, pmd);
-       if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
-               spin_unlock(ptl);
-
-               /* Reverse changes made by migrate_page_copy() */
-               if (TestClearPageActive(new_page))
-                       SetPageActive(page);
-               if (TestClearPageUnevictable(new_page))
-                       SetPageUnevictable(page);
-
-               unlock_page(new_page);
-               put_page(new_page);             /* Free it */
-
-               /* Retake the callers reference and putback on LRU */
-               get_page(page);
-               putback_lru_page(page);
-               mod_node_page_state(page_pgdat(page),
-                        NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
-
-               goto out_unlock;
-       }
-
-       entry = mk_huge_pmd(new_page, vma->vm_page_prot);
-       entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
-
-       /*
-        * Overwrite the old entry under pagetable lock and establish
-        * the new PTE. Any parallel GUP will either observe the old
-        * page blocking on the page lock, block on the page table
-        * lock or observe the new page. The SetPageUptodate on the
-        * new page and page_add_new_anon_rmap guarantee the copy is
-        * visible before the pagetable update.
-        */
-       page_add_anon_rmap(new_page, vma, start, true);
-       /*
-        * At this point the pmd is numa/protnone (i.e. non present) and the TLB
-        * has already been flushed globally.  So no TLB can be currently
-        * caching this non present pmd mapping.  There's no need to clear the
-        * pmd before doing set_pmd_at(), nor to flush the TLB after
-        * set_pmd_at().  Clearing the pmd here would introduce a race
-        * condition against MADV_DONTNEED, because MADV_DONTNEED only holds the
-        * mmap_lock for reading.  If the pmd is set to NULL at any given time,
-        * MADV_DONTNEED won't wait on the pmd lock and it'll skip clearing this
-        * pmd.
-        */
-       set_pmd_at(mm, start, pmd, entry);
-       update_mmu_cache_pmd(vma, address, &entry);
-
-       page_ref_unfreeze(page, 2);
-       mlock_migrate_page(new_page, page);
-       page_remove_rmap(page, true);
-       set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
-
-       spin_unlock(ptl);
-
-       /* Take an "isolate" reference and put new page on the LRU. */
-       get_page(new_page);
-       putback_lru_page(new_page);
-
-       unlock_page(new_page);
-       unlock_page(page);
-       put_page(page);                 /* Drop the rmap reference */
-       put_page(page);                 /* Drop the LRU isolation reference */
-
-       count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
-       count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
-
-       mod_node_page_state(page_pgdat(page),
-                       NR_ISOLATED_ANON + page_lru,
-                       -HPAGE_PMD_NR);
-       return isolated;
-
-out_fail:
-       count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
-       ptl = pmd_lock(mm, pmd);
-       if (pmd_same(*pmd, entry)) {
-               entry = pmd_modify(entry, vma->vm_page_prot);
-               set_pmd_at(mm, start, pmd, entry);
-               update_mmu_cache_pmd(vma, address, &entry);
-       }
-       spin_unlock(ptl);
-
-out_unlock:
-       unlock_page(page);
-       put_page(page);
-       return 0;
-}
-#endif /* CONFIG_NUMA_BALANCING */
-
 #endif /* CONFIG_NUMA */
 
 #ifdef CONFIG_DEVICE_PRIVATE