pmd_t *pmd, _pmd;
        pte_t *pte;
        pgtable_t pgtable;
+       struct folio *folio;
        struct page *hpage;
        spinlock_t *pmd_ptl, *pte_ptl;
        int result = SCAN_FAIL;
        if (unlikely(result != SCAN_SUCCEED))
                goto out_up_write;
 
+       folio = page_folio(hpage);
        /*
-        * spin_lock() below is not the equivalent of smp_wmb(), but
-        * the smp_wmb() inside __SetPageUptodate() can be reused to
-        * avoid the copy_huge_page writes to become visible after
-        * the set_pmd_at() write.
+        * The smp_wmb() inside __folio_mark_uptodate() ensures the
+        * copy_huge_page writes become visible before the set_pmd_at()
+        * write.
         */
-       __SetPageUptodate(hpage);
+       __folio_mark_uptodate(folio);
        pgtable = pmd_pgtable(_pmd);
 
        _pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
 
        spin_lock(pmd_ptl);
        BUG_ON(!pmd_none(*pmd));
-       page_add_new_anon_rmap(hpage, vma, address);
-       lru_cache_add_inactive_or_unevictable(hpage, vma);
+       folio_add_new_anon_rmap(folio, vma, address);
+       folio_add_lru_vma(folio, vma);
        pgtable_trans_huge_deposit(mm, pmd, pgtable);
        set_pmd_at(mm, address, pmd, _pmd);
        update_mmu_cache_pmd(vma, address, pmd);