VM_BUG_ON(atomic_read(&anon_vma->refcount));
 
        /*
-        * Synchronize against page_lock_anon_vma_read() such that
+        * Synchronize against folio_lock_anon_vma_read() such that
         * we can safely hold the lock without the anon_vma getting
         * freed.
         *
         * Relies on the full mb implied by the atomic_dec_and_test() from
         * put_anon_vma() against the acquire barrier implied by
-        * down_read_trylock() from page_lock_anon_vma_read(). This orders:
+        * down_read_trylock() from folio_lock_anon_vma_read(). This orders:
         *
-        * page_lock_anon_vma_read()    VS      put_anon_vma()
+        * folio_lock_anon_vma_read()   VS      put_anon_vma()
         *   down_read_trylock()                  atomic_dec_and_test()
         *   LOCK                                 MB
         *   atomic_read()                        rwsem_is_locked()
  * allocate a new one.
  *
  * Anon-vma allocations are very subtle, because we may have
- * optimistically looked up an anon_vma in page_lock_anon_vma_read()
+ * optimistically looked up an anon_vma in folio_lock_anon_vma_read()
  * and that may actually touch the rwsem even in the newly
  * allocated vma (it depends on RCU to make sure that the
  * anon_vma isn't actually destroyed).
 /*
  * arg: folio_referenced_arg will be passed
  */
-static bool folio_referenced_one(struct page *page, struct vm_area_struct *vma,
-                       unsigned long address, void *arg)
+static bool folio_referenced_one(struct folio *folio,
+               struct vm_area_struct *vma, unsigned long address, void *arg)
 {
-       struct folio *folio = page_folio(page);
        struct folio_referenced_arg *pra = arg;
        DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
        int referenced = 0;
        struct rmap_walk_control rwc = {
                .rmap_one = folio_referenced_one,
                .arg = (void *)&pra,
-               .anon_lock = page_lock_anon_vma_read,
+               .anon_lock = folio_lock_anon_vma_read,
        };
 
        *vm_flags = 0;
                rwc.invalid_vma = invalid_folio_referenced_vma;
        }
 
-       rmap_walk(&folio->page, &rwc);
+       rmap_walk(folio, &rwc);
        *vm_flags = pra.vm_flags;
 
        if (we_locked)
        return pra.referenced;
 }
 
-static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
+static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
                            unsigned long address, void *arg)
 {
-       struct folio *folio = page_folio(page);
        DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
        struct mmu_notifier_range range;
        int *cleaned = arg;
        if (!mapping)
                return 0;
 
-       rmap_walk(&folio->page, &rwc);
+       rmap_walk(folio, &rwc);
 
        return cleaned;
 }
 /*
  * @arg: enum ttu_flags will be passed to this argument
  */
-static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                     unsigned long address, void *arg)
 {
-       struct folio *folio = page_folio(page);
        struct mm_struct *mm = vma->vm_mm;
        DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
        pte_t pteval;
        return vma_is_temporary_stack(vma);
 }
 
-static int page_not_mapped(struct page *page)
+static int page_not_mapped(struct folio *folio)
 {
-       return !page_mapped(page);
+       return !folio_mapped(folio);
 }
 
 /**
                .rmap_one = try_to_unmap_one,
                .arg = (void *)flags,
                .done = page_not_mapped,
-               .anon_lock = page_lock_anon_vma_read,
+               .anon_lock = folio_lock_anon_vma_read,
        };
 
        if (flags & TTU_RMAP_LOCKED)
-               rmap_walk_locked(&folio->page, &rwc);
+               rmap_walk_locked(folio, &rwc);
        else
-               rmap_walk(&folio->page, &rwc);
+               rmap_walk(folio, &rwc);
 }
 
 /*
  * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs
  * containing migration entries.
  */
-static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
+static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                     unsigned long address, void *arg)
 {
-       struct folio *folio = page_folio(page);
        struct mm_struct *mm = vma->vm_mm;
        DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
        pte_t pteval;
                .rmap_one = try_to_migrate_one,
                .arg = (void *)flags,
                .done = page_not_mapped,
-               .anon_lock = page_lock_anon_vma_read,
+               .anon_lock = folio_lock_anon_vma_read,
        };
 
        /*
                rwc.invalid_vma = invalid_migration_vma;
 
        if (flags & TTU_RMAP_LOCKED)
-               rmap_walk_locked(&folio->page, &rwc);
+               rmap_walk_locked(folio, &rwc);
        else
-               rmap_walk(&folio->page, &rwc);
+               rmap_walk(folio, &rwc);
 }
 
 #ifdef CONFIG_DEVICE_PRIVATE
        bool valid;
 };
 
-static bool page_make_device_exclusive_one(struct page *page,
+static bool page_make_device_exclusive_one(struct folio *folio,
                struct vm_area_struct *vma, unsigned long address, void *priv)
 {
-       struct folio *folio = page_folio(page);
        struct mm_struct *mm = vma->vm_mm;
        DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
        struct make_exclusive_args *args = priv;
        struct rmap_walk_control rwc = {
                .rmap_one = page_make_device_exclusive_one,
                .done = page_not_mapped,
-               .anon_lock = page_lock_anon_vma_read,
+               .anon_lock = folio_lock_anon_vma_read,
                .arg = &args,
        };
 
        if (!folio_test_anon(folio))
                return false;
 
-       rmap_walk(&folio->page, &rwc);
+       rmap_walk(folio, &rwc);
 
        return args.valid && !folio_mapcount(folio);
 }
                anon_vma_free(root);
 }
 
-static struct anon_vma *rmap_walk_anon_lock(struct page *page,
+static struct anon_vma *rmap_walk_anon_lock(struct folio *folio,
                                        struct rmap_walk_control *rwc)
 {
-       struct folio *folio = page_folio(page);
        struct anon_vma *anon_vma;
 
        if (rwc->anon_lock)
-               return rwc->anon_lock(page);
+               return rwc->anon_lock(folio);
 
        /*
-        * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read()
+        * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read()
         * because that depends on page_mapped(); but not all its usages
         * are holding mmap_lock. Users without mmap_lock are required to
         * take a reference count to prevent the anon_vma disappearing
  * Find all the mappings of a page using the mapping pointer and the vma chains
  * contained in the anon_vma struct it points to.
  */
-static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
+static void rmap_walk_anon(struct folio *folio, struct rmap_walk_control *rwc,
                bool locked)
 {
-       struct folio *folio = page_folio(page);
        struct anon_vma *anon_vma;
        pgoff_t pgoff_start, pgoff_end;
        struct anon_vma_chain *avc;
                /* anon_vma disappear under us? */
                VM_BUG_ON_FOLIO(!anon_vma, folio);
        } else {
-               anon_vma = rmap_walk_anon_lock(page, rwc);
+               anon_vma = rmap_walk_anon_lock(folio, rwc);
        }
        if (!anon_vma)
                return;
 
-       pgoff_start = page_to_pgoff(page);
-       pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
+       pgoff_start = folio_pgoff(folio);
+       pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
        anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
                        pgoff_start, pgoff_end) {
                struct vm_area_struct *vma = avc->vma;
-               unsigned long address = vma_address(page, vma);
+               unsigned long address = vma_address(&folio->page, vma);
 
                VM_BUG_ON_VMA(address == -EFAULT, vma);
                cond_resched();
                if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
                        continue;
 
-               if (!rwc->rmap_one(page, vma, address, rwc->arg))
+               if (!rwc->rmap_one(folio, vma, address, rwc->arg))
                        break;
-               if (rwc->done && rwc->done(page))
+               if (rwc->done && rwc->done(folio))
                        break;
        }
 
  * Find all the mappings of a page using the mapping pointer and the vma chains
  * contained in the address_space struct it points to.
  */
-static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
+static void rmap_walk_file(struct folio *folio, struct rmap_walk_control *rwc,
                bool locked)
 {
-       struct address_space *mapping = page_mapping(page);
+       struct address_space *mapping = folio_mapping(folio);
        pgoff_t pgoff_start, pgoff_end;
        struct vm_area_struct *vma;
 
         * structure at mapping cannot be freed and reused yet,
         * so we can safely take mapping->i_mmap_rwsem.
         */
-       VM_BUG_ON_PAGE(!PageLocked(page), page);
+       VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 
        if (!mapping)
                return;
 
-       pgoff_start = page_to_pgoff(page);
-       pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
+       pgoff_start = folio_pgoff(folio);
+       pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
        if (!locked)
                i_mmap_lock_read(mapping);
        vma_interval_tree_foreach(vma, &mapping->i_mmap,
                        pgoff_start, pgoff_end) {
-               unsigned long address = vma_address(page, vma);
+               unsigned long address = vma_address(&folio->page, vma);
 
                VM_BUG_ON_VMA(address == -EFAULT, vma);
                cond_resched();
                if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
                        continue;
 
-               if (!rwc->rmap_one(page, vma, address, rwc->arg))
+               if (!rwc->rmap_one(folio, vma, address, rwc->arg))
                        goto done;
-               if (rwc->done && rwc->done(page))
+               if (rwc->done && rwc->done(folio))
                        goto done;
        }
 
                i_mmap_unlock_read(mapping);
 }
 
-void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
+void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc)
 {
-       if (unlikely(PageKsm(page)))
-               rmap_walk_ksm(page, rwc);
-       else if (PageAnon(page))
-               rmap_walk_anon(page, rwc, false);
+       if (unlikely(folio_test_ksm(folio)))
+               rmap_walk_ksm(folio, rwc);
+       else if (folio_test_anon(folio))
+               rmap_walk_anon(folio, rwc, false);
        else
-               rmap_walk_file(page, rwc, false);
+               rmap_walk_file(folio, rwc, false);
 }
 
 /* Like rmap_walk, but caller holds relevant rmap lock */
-void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
+void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
 {
        /* no ksm support for now */
-       VM_BUG_ON_PAGE(PageKsm(page), page);
-       if (PageAnon(page))
-               rmap_walk_anon(page, rwc, true);
+       VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio);
+       if (folio_test_anon(folio))
+               rmap_walk_anon(folio, rwc, true);
        else
-               rmap_walk_file(page, rwc, true);
+               rmap_walk_file(folio, rwc, true);
 }
 
 #ifdef CONFIG_HUGETLB_PAGE