void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
 {
+       struct folio *folio = page_folio(new);
        struct vm_area_struct *vma = pvmw->vma;
        struct mm_struct *mm = vma->vm_mm;
        unsigned long address = pvmw->address;
                return;
 
        entry = pmd_to_swp_entry(*pvmw->pmd);
-       get_page(new);
+       folio_get(folio);
        pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
        if (pmd_swp_soft_dirty(*pvmw->pmd))
                pmde = pmd_mksoft_dirty(pmde);
        if (!is_migration_entry_young(entry))
                pmde = pmd_mkold(pmde);
        /* NOTE: this may contain setting soft-dirty on some archs */
-       if (PageDirty(new) && is_migration_entry_dirty(entry))
+       if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
                pmde = pmd_mkdirty(pmde);
 
-       if (PageAnon(new)) {
+       if (folio_test_anon(folio)) {
                rmap_t rmap_flags = RMAP_COMPOUND;
 
                if (!is_readable_migration_entry(entry))
 
                page_add_anon_rmap(new, vma, haddr, rmap_flags);
        } else {
-               page_add_file_rmap(new, vma, true);
+               folio_add_file_rmap_pmd(folio, new, vma);
        }
-       VM_BUG_ON(pmd_write(pmde) && PageAnon(new) && !PageAnonExclusive(new));
+       VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new));
        set_pmd_at(mm, haddr, pvmw->pmd, pmde);
 
        /* No need to invalidate - it was non-present before */