static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
-       struct page *page;
        struct folio *folio;
        vm_fault_t ret = 0;
        pte_t entry;
        if (!folio)
                goto oom;
 
-       page = &folio->page;
        if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
                goto oom_free_page;
-       cgroup_throttle_swaprate(page, GFP_KERNEL);
+       cgroup_throttle_swaprate(&folio->page, GFP_KERNEL);
 
        /*
-        * The memory barrier inside __SetPageUptodate makes sure that
+        * The memory barrier inside __folio_mark_uptodate makes sure that
         * preceding stores to the page contents become visible before
         * the set_pte_at() write.
         */
-       __SetPageUptodate(page);
+       __folio_mark_uptodate(folio);
 
-       entry = mk_pte(page, vma->vm_page_prot);
+       entry = mk_pte(&folio->page, vma->vm_page_prot);
        entry = pte_sw_mkyoung(entry);
        if (vma->vm_flags & VM_WRITE)
                entry = pte_mkwrite(pte_mkdirty(entry));
        /* Deliver the page fault to userland, check inside PT lock */
        if (userfaultfd_missing(vma)) {
                pte_unmap_unlock(vmf->pte, vmf->ptl);
-               put_page(page);
+               folio_put(folio);
                return handle_userfault(vmf, VM_UFFD_MISSING);
        }
 
        inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
-       page_add_new_anon_rmap(page, vma, vmf->address);
-       lru_cache_add_inactive_or_unevictable(page, vma);
+       folio_add_new_anon_rmap(folio, vma, vmf->address);
+       folio_add_lru_vma(folio, vma);
 setpte:
        set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
 
        pte_unmap_unlock(vmf->pte, vmf->ptl);
        return ret;
 release:
-       put_page(page);
+       folio_put(folio);
        goto unlock;
 oom_free_page:
-       put_page(page);
+       folio_put(folio);
 oom:
        return VM_FAULT_OOM;
 }