struct mm_walk *walk)
 {
        struct migrate_vma *migrate = walk->private;
+       struct folio *fault_folio = migrate->fault_page ?
+               page_folio(migrate->fault_page) : NULL;
        struct vm_area_struct *vma = walk->vma;
        struct mm_struct *mm = vma->vm_mm;
        unsigned long addr = start, unmapped = 0;
 
                        folio_get(folio);
                        spin_unlock(ptl);
+                       /* FIXME: we don't expect THP for fault_folio */
+                       if (WARN_ON_ONCE(fault_folio == folio))
+                               return migrate_vma_collect_skip(start, end,
+                                                               walk);
                        if (unlikely(!folio_trylock(folio)))
                                return migrate_vma_collect_skip(start, end,
                                                                walk);
                        ret = split_folio(folio);
-                       folio_unlock(folio);
+                       if (fault_folio != folio)
+                               folio_unlock(folio);
                        folio_put(folio);
                        if (ret)
                                return migrate_vma_collect_skip(start, end,
                 * optimisation to avoid walking the rmap later with
                 * try_to_migrate().
                 */
-               if (folio_trylock(folio)) {
+               if (fault_folio == folio || folio_trylock(folio)) {
                        bool anon_exclusive;
                        pte_t swp_pte;
 
 
                                if (folio_try_share_anon_rmap_pte(folio, page)) {
                                        set_pte_at(mm, addr, ptep, pte);
-                                       folio_unlock(folio);
+                                       if (fault_folio != folio)
+                                               folio_unlock(folio);
                                        folio_put(folio);
                                        mpfn = 0;
                                        goto next;
                                          unsigned long npages,
                                          struct page *fault_page)
 {
+       struct folio *fault_folio = fault_page ?
+               page_folio(fault_page) : NULL;
        unsigned long i, restore = 0;
        bool allow_drain = true;
        unsigned long unmapped = 0;
                remove_migration_ptes(folio, folio, 0);
 
                src_pfns[i] = 0;
-               folio_unlock(folio);
+               if (fault_folio != folio)
+                       folio_unlock(folio);
                folio_put(folio);
                restore--;
        }
                return -EINVAL;
        if (args->fault_page && !is_device_private_page(args->fault_page))
                return -EINVAL;
+       if (args->fault_page && !PageLocked(args->fault_page))
+               return -EINVAL;
 
        memset(args->src, 0, sizeof(*args->src) * nr_pages);
        args->cpages = 0;
 }
 EXPORT_SYMBOL(migrate_vma_pages);
 
-/*
- * migrate_device_finalize() - complete page migration
- * @src_pfns: src_pfns returned from migrate_device_range()
- * @dst_pfns: array of pfns allocated by the driver to migrate memory to
- * @npages: number of pages in the range
- *
- * Completes migration of the page by removing special migration entries.
- * Drivers must ensure copying of page data is complete and visible to the CPU
- * before calling this.
- */
-void migrate_device_finalize(unsigned long *src_pfns,
-                       unsigned long *dst_pfns, unsigned long npages)
+static void __migrate_device_finalize(unsigned long *src_pfns,
+                                     unsigned long *dst_pfns,
+                                     unsigned long npages,
+                                     struct page *fault_page)
 {
+       struct folio *fault_folio = fault_page ?
+               page_folio(fault_page) : NULL;
        unsigned long i;
 
        for (i = 0; i < npages; i++) {
 
                if (!page) {
                        if (dst) {
+                               WARN_ON_ONCE(fault_folio == dst);
                                folio_unlock(dst);
                                folio_put(dst);
                        }
 
                if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE) || !dst) {
                        if (dst) {
+                               WARN_ON_ONCE(fault_folio == dst);
                                folio_unlock(dst);
                                folio_put(dst);
                        }
                if (!folio_is_zone_device(dst))
                        folio_add_lru(dst);
                remove_migration_ptes(src, dst, 0);
-               folio_unlock(src);
+               if (fault_folio != src)
+                       folio_unlock(src);
                folio_put(src);
 
                if (dst != src) {
+                       WARN_ON_ONCE(fault_folio == dst);
                        folio_unlock(dst);
                        folio_put(dst);
                }
        }
 }
+
+/*
+ * migrate_device_finalize() - complete page migration
+ * @src_pfns: src_pfns returned from migrate_device_range()
+ * @dst_pfns: array of pfns allocated by the driver to migrate memory to
+ * @npages: number of pages in the range
+ *
+ * Completes migration of the page by removing special migration entries.
+ * Drivers must ensure copying of page data is complete and visible to the CPU
+ * before calling this.
+ */
+void migrate_device_finalize(unsigned long *src_pfns,
+                            unsigned long *dst_pfns, unsigned long npages)
+{
+       return __migrate_device_finalize(src_pfns, dst_pfns, npages, NULL);
+}
 EXPORT_SYMBOL(migrate_device_finalize);
 
 /**
  */
 void migrate_vma_finalize(struct migrate_vma *migrate)
 {
-       migrate_device_finalize(migrate->src, migrate->dst, migrate->npages);
+       __migrate_device_finalize(migrate->src, migrate->dst, migrate->npages,
+                                 migrate->fault_page);
 }
 EXPORT_SYMBOL(migrate_vma_finalize);