static inline int
 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
                  pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
-                 struct page **prealloc, struct page *page)
+                 struct folio **prealloc, struct page *page)
 {
-       struct page *new_page;
+       struct folio *new_folio;
        pte_t pte;
 
-       new_page = *prealloc;
-       if (!new_page)
+       new_folio = *prealloc;
+       if (!new_folio)
                return -EAGAIN;
 
        /*
         * over and copy the page & arm it.
         */
        *prealloc = NULL;
-       copy_user_highpage(new_page, page, addr, src_vma);
-       __SetPageUptodate(new_page);
-       page_add_new_anon_rmap(new_page, dst_vma, addr);
-       lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
-       rss[mm_counter(new_page)]++;
+       copy_user_highpage(&new_folio->page, page, addr, src_vma);
+       __folio_mark_uptodate(new_folio);
+       folio_add_new_anon_rmap(new_folio, dst_vma, addr);
+       folio_add_lru_vma(new_folio, dst_vma);
+       rss[MM_ANONPAGES]++;
 
        /* All done, just insert the new page copy in the child */
-       pte = mk_pte(new_page, dst_vma->vm_page_prot);
+       pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot);
        pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
        if (userfaultfd_pte_wp(dst_vma, *src_pte))
                /* Uffd-wp needs to be delivered to dest pte as well */
 static inline int
 copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
                 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
-                struct page **prealloc)
+                struct folio **prealloc)
 {
        struct mm_struct *src_mm = src_vma->vm_mm;
        unsigned long vm_flags = src_vma->vm_flags;
                        return copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
                                                 addr, rss, prealloc, page);
                }
-               rss[mm_counter(page)]++;
+               rss[MM_ANONPAGES]++;
        } else if (page) {
                get_page(page);
                page_dup_file_rmap(page, false);
-               rss[mm_counter(page)]++;
+               rss[mm_counter_file(page)]++;
        }
 
        /*
        return 0;
 }
 
-static inline struct page *
-page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma,
-                  unsigned long addr)
+static inline struct folio *page_copy_prealloc(struct mm_struct *src_mm,
+               struct vm_area_struct *vma, unsigned long addr)
 {
-       struct page *new_page;
+       struct folio *new_folio;
 
-       new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr);
-       if (!new_page)
+       new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
+       if (!new_folio)
                return NULL;
 
-       if (mem_cgroup_charge(page_folio(new_page), src_mm, GFP_KERNEL)) {
-               put_page(new_page);
+       if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) {
+               folio_put(new_folio);
                return NULL;
        }
-       cgroup_throttle_swaprate(new_page, GFP_KERNEL);
+       cgroup_throttle_swaprate(&new_folio->page, GFP_KERNEL);
 
-       return new_page;
+       return new_folio;
 }
 
 static int
        int progress, ret = 0;
        int rss[NR_MM_COUNTERS];
        swp_entry_t entry = (swp_entry_t){0};
-       struct page *prealloc = NULL;
+       struct folio *prealloc = NULL;
 
 again:
        progress = 0;
                         * will allocate page according to address).  This
                         * could only happen if one pinned pte changed.
                         */
-                       put_page(prealloc);
+                       folio_put(prealloc);
                        prealloc = NULL;
                }
                progress += 8;
                goto again;
 out:
        if (unlikely(prealloc))
-               put_page(prealloc);
+               folio_put(prealloc);
        return ret;
 }