/* Establish migration ptes */
                VM_BUG_ON_FOLIO(folio_test_anon(src) &&
                               !folio_test_ksm(src) && !anon_vma, src);
-               try_to_migrate(src, 0);
+               try_to_migrate(src, TTU_BATCH_FLUSH);
                page_was_mapped = 1;
        }
 
        stats->nr_thp_failed += thp_retry;
        stats->nr_failed_pages += nr_retry_pages;
 move:
+       /* Flush TLBs for all unmapped folios */
+       try_to_unmap_flush();
+
        retry = 1;
        for (pass = 0;
             pass < NR_MAX_MIGRATE_PAGES_RETRY && (retry || large_retry);
 
                } else {
                        flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
                        /* Nuke the page table entry. */
-                       pteval = ptep_clear_flush(vma, address, pvmw.pte);
+                       if (should_defer_flush(mm, flags)) {
+                               /*
+                                * We clear the PTE but do not flush so potentially
+                                * a remote CPU could still be writing to the folio.
+                                * If the entry was previously clean then the
+                                * architecture must guarantee that a clear->dirty
+                                * transition on a cached TLB entry is written through
+                                * and traps if the PTE is unmapped.
+                                */
+                               pteval = ptep_get_and_clear(mm, address, pvmw.pte);
+
+                               set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
+                       } else {
+                               pteval = ptep_clear_flush(vma, address, pvmw.pte);
+                       }
                }
 
                /* Set the dirty flag on the folio now the pte is gone. */
 
        /*
         * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and
-        * TTU_SPLIT_HUGE_PMD and TTU_SYNC flags.
+        * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags.
         */
        if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
-                                       TTU_SYNC)))
+                                       TTU_SYNC | TTU_BATCH_FLUSH)))
                return;
 
        if (folio_is_zone_device(folio) &&