unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
        int page_nid = -1, this_nid = numa_node_id();
        int target_nid, last_cpupid = -1;
-       bool need_flush = false;
        bool page_locked;
        bool migrated = false;
        bool was_writable;
                goto clear_pmdnuma;
        }
 
-       /*
-        * The page_table_lock above provides a memory barrier
-        * with change_protection_range.
-        */
-       if (mm_tlb_flush_pending(vma->vm_mm))
-               flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
-
        /*
         * Since we took the NUMA fault, we must have observed the !accessible
         * bit. Make sure all other CPUs agree with that, to avoid them
         * modifying the page we're about to migrate.
         *
         * Must be done under PTL such that we'll observe the relevant
-        * set_tlb_flush_pending().
+        * inc_tlb_flush_pending().
+        *
+        * We are not sure a pending tlb flush here is for a huge page
+        * mapping or not. Hence use the tlb range variant
         */
        if (mm_tlb_flush_pending(vma->vm_mm))
-               need_flush = true;
+               flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
 
        /*
         * Migrate the THP to the requested node, returns with page unlocked
         */
        spin_unlock(vmf->ptl);
 
-       /*
-        * We are not sure a pending tlb flush here is for a huge page
-        * mapping or not. Hence use the tlb range variant
-        */
-       if (need_flush)
-               flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
-
        migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma,
                                vmf->pmd, pmd, vmf->address, page, target_nid);
        if (migrated) {