static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
                        unsigned long end, struct mm_walk *walk)
 {
+       const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
        struct vm_area_struct *vma = walk->vma;
        struct folio *folio;
        struct queue_pages *qp = walk->private;
        pte_t *pte, *mapped_pte;
        pte_t ptent;
        spinlock_t *ptl;
+       int max_nr, nr;
 
        ptl = pmd_trans_huge_lock(pmd, vma);
        if (ptl) {
                walk->action = ACTION_AGAIN;
                return 0;
        }
-       for (; addr != end; pte++, addr += PAGE_SIZE) {
+       for (; addr != end; pte += nr, addr += nr * PAGE_SIZE) {
+               max_nr = (end - addr) >> PAGE_SHIFT;
+               nr = 1;
                ptent = ptep_get(pte);
                if (pte_none(ptent))
                        continue;
                folio = vm_normal_folio(vma, addr, ptent);
                if (!folio || folio_is_zone_device(folio))
                        continue;
+               if (folio_test_large(folio) && max_nr != 1)
+                       nr = folio_pte_batch(folio, addr, pte, ptent,
+                                            max_nr, fpb_flags,
+                                            NULL, NULL, NULL);
                /*
                 * vm_normal_folio() filters out zero pages, but there might
                 * still be reserved folios to skip, perhaps in a VDSO.
                if (!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
                    !vma_migratable(vma) ||
                    !migrate_folio_add(folio, qp->pagelist, flags)) {
-                       qp->nr_failed++;
+                       qp->nr_failed += nr;
                        if (strictly_unmovable(flags))
                                break;
                }