}
 
 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
-                               pgoff_t index, gfp_t gfp_mask);
+               pgoff_t index, gfp_t gfp);
 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
-                               pgoff_t index, gfp_t gfp_mask);
+               pgoff_t index, gfp_t gfp);
+int filemap_add_folio(struct address_space *mapping, struct folio *folio,
+               pgoff_t index, gfp_t gfp);
 extern void delete_from_page_cache(struct page *page);
 extern void __delete_from_page_cache(struct page *page, void *shadow);
 void replace_page_cache_page(struct page *old, struct page *new);
        return error;
 }
 
+/* Must be non-static for BPF error injection */
+int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
+               pgoff_t index, gfp_t gfp, void **shadowp);
+
 /**
  * struct readahead_control - Describes a readahead request.
  *
 
 }
 EXPORT_SYMBOL_GPL(replace_page_cache_page);
 
-noinline int __add_to_page_cache_locked(struct page *page,
-                                       struct address_space *mapping,
-                                       pgoff_t offset, gfp_t gfp,
-                                       void **shadowp)
+noinline int __filemap_add_folio(struct address_space *mapping,
+               struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp)
 {
-       XA_STATE(xas, &mapping->i_pages, offset);
-       int huge = PageHuge(page);
+       XA_STATE(xas, &mapping->i_pages, index);
+       int huge = folio_test_hugetlb(folio);
        int error;
        bool charged = false;
 
-       VM_BUG_ON_PAGE(!PageLocked(page), page);
-       VM_BUG_ON_PAGE(PageSwapBacked(page), page);
+       VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+       VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
        mapping_set_update(&xas, mapping);
 
-       get_page(page);
-       page->mapping = mapping;
-       page->index = offset;
+       folio_get(folio);
+       folio->mapping = mapping;
+       folio->index = index;
 
        if (!huge) {
-               error = mem_cgroup_charge(page_folio(page), NULL, gfp);
+               error = mem_cgroup_charge(folio, NULL, gfp);
+               VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
                if (error)
                        goto error;
                charged = true;
                unsigned int order = xa_get_order(xas.xa, xas.xa_index);
                void *entry, *old = NULL;
 
-               if (order > thp_order(page))
+               if (order > folio_order(folio))
                        xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
                                        order, gfp);
                xas_lock_irq(&xas);
                                *shadowp = old;
                        /* entry may have been split before we acquired lock */
                        order = xa_get_order(xas.xa, xas.xa_index);
-                       if (order > thp_order(page)) {
+                       if (order > folio_order(folio)) {
                                xas_split(&xas, old, order);
                                xas_reset(&xas);
                        }
                }
 
-               xas_store(&xas, page);
+               xas_store(&xas, folio);
                if (xas_error(&xas))
                        goto unlock;
 
 
                /* hugetlb pages do not participate in page cache accounting */
                if (!huge)
-                       __inc_lruvec_page_state(page, NR_FILE_PAGES);
+                       __lruvec_stat_add_folio(folio, NR_FILE_PAGES);
 unlock:
                xas_unlock_irq(&xas);
        } while (xas_nomem(&xas, gfp));
        if (xas_error(&xas)) {
                error = xas_error(&xas);
                if (charged)
-                       mem_cgroup_uncharge(page_folio(page));
+                       mem_cgroup_uncharge(folio);
                goto error;
        }
 
-       trace_mm_filemap_add_to_page_cache(page);
+       trace_mm_filemap_add_to_page_cache(&folio->page);
        return 0;
 error:
-       page->mapping = NULL;
+       folio->mapping = NULL;
        /* Leave page->index set: truncation relies upon it */
-       put_page(page);
+       folio_put(folio);
        return error;
 }
-ALLOW_ERROR_INJECTION(__add_to_page_cache_locked, ERRNO);
+ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);
 
 /**
  * add_to_page_cache_locked - add a locked page to the pagecache
 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
                pgoff_t offset, gfp_t gfp_mask)
 {
-       return __add_to_page_cache_locked(page, mapping, offset,
+       return __filemap_add_folio(mapping, page_folio(page), offset,
                                          gfp_mask, NULL);
 }
 EXPORT_SYMBOL(add_to_page_cache_locked);
 
-int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
-                               pgoff_t offset, gfp_t gfp_mask)
+int filemap_add_folio(struct address_space *mapping, struct folio *folio,
+                               pgoff_t index, gfp_t gfp)
 {
        void *shadow = NULL;
        int ret;
 
-       __SetPageLocked(page);
-       ret = __add_to_page_cache_locked(page, mapping, offset,
-                                        gfp_mask, &shadow);
+       __folio_set_locked(folio);
+       ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow);
        if (unlikely(ret))
-               __ClearPageLocked(page);
+               __folio_clear_locked(folio);
        else {
                /*
-                * The page might have been evicted from cache only
+                * The folio might have been evicted from cache only
                 * recently, in which case it should be activated like
-                * any other repeatedly accessed page.
-                * The exception is pages getting rewritten; evicting other
+                * any other repeatedly accessed folio.
+                * The exception is folios getting rewritten; evicting other
                 * data from the working set, only to cache data that will
                 * get overwritten with something else, is a waste of memory.
                 */
-               WARN_ON_ONCE(PageActive(page));
-               if (!(gfp_mask & __GFP_WRITE) && shadow)
-                       workingset_refault(page_folio(page), shadow);
-               lru_cache_add(page);
+               WARN_ON_ONCE(folio_test_active(folio));
+               if (!(gfp & __GFP_WRITE) && shadow)
+                       workingset_refault(folio, shadow);
+               folio_add_lru(folio);
        }
        return ret;
 }
-EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
+EXPORT_SYMBOL_GPL(filemap_add_folio);
 
 #ifdef CONFIG_NUMA
 struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)