* We are not sure a pending tlb flush here is for a huge page
         * mapping or not. Hence use the tlb range variant
         */
-       if (mm_tlb_flush_pending(vma->vm_mm))
+       if (mm_tlb_flush_pending(vma->vm_mm)) {
                flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
+               /*
+                * change_huge_pmd() released the pmd lock before
+                * invalidating the secondary MMUs sharing the primary
+                * MMU pagetables (with ->invalidate_range()). The
+                * mmu_notifier_invalidate_range_end() (which
+                * internally calls ->invalidate_range()) in
+                * change_pmd_range() will run after us, so we can't
+                * rely on it here and we need an explicit invalidate.
+                */
+               mmu_notifier_invalidate_range(vma->vm_mm, haddr,
+                                             haddr + HPAGE_PMD_SIZE);
+       }
 
        /*
         * Migrate the THP to the requested node, returns with page unlocked
 
        int isolated = 0;
        struct page *new_page = NULL;
        int page_lru = page_is_file_cache(page);
-       unsigned long mmun_start = address & HPAGE_PMD_MASK;
-       unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
+       unsigned long start = address & HPAGE_PMD_MASK;
+       unsigned long end = start + HPAGE_PMD_SIZE;
 
        new_page = alloc_pages_node(node,
                (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
        WARN_ON(PageLRU(new_page));
 
        /* Recheck the target PMD */
-       mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        ptl = pmd_lock(mm, pmd);
        if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
                spin_unlock(ptl);
-               mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
                /* Reverse changes made by migrate_page_copy() */
                if (TestClearPageActive(new_page))
         * new page and page_add_new_anon_rmap guarantee the copy is
         * visible before the pagetable update.
         */
-       flush_cache_range(vma, mmun_start, mmun_end);
-       page_add_anon_rmap(new_page, vma, mmun_start, true);
+       flush_cache_range(vma, start, end);
+       page_add_anon_rmap(new_page, vma, start, true);
        /*
         * At this point the pmd is numa/protnone (i.e. non present) and the TLB
         * has already been flushed globally.  So no TLB can be currently
         * MADV_DONTNEED won't wait on the pmd lock and it'll skip clearing this
         * pmd.
         */
-       set_pmd_at(mm, mmun_start, pmd, entry);
+       set_pmd_at(mm, start, pmd, entry);
        update_mmu_cache_pmd(vma, address, &entry);
 
        page_ref_unfreeze(page, 2);
        set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
 
        spin_unlock(ptl);
-       /*
-        * No need to double call mmu_notifier->invalidate_range() callback as
-        * the above pmdp_huge_clear_flush_notify() did already call it.
-        */
-       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
        /* Take an "isolate" reference and put new page on the LRU. */
        get_page(new_page);
        ptl = pmd_lock(mm, pmd);
        if (pmd_same(*pmd, entry)) {
                entry = pmd_modify(entry, vma->vm_page_prot);
-               set_pmd_at(mm, mmun_start, pmd, entry);
+               set_pmd_at(mm, start, pmd, entry);
                update_mmu_cache_pmd(vma, address, &entry);
        }
        spin_unlock(ptl);