int memory_failure(unsigned long pfn, int flags)
 {
        struct page *p;
-       struct page *hpage;
+       struct folio *folio;
        struct dev_pagemap *pgmap;
        int res = 0;
        unsigned long page_flags;
                }
        }
 
-       hpage = compound_head(p);
-       if (PageTransHuge(hpage)) {
+       folio = page_folio(p);
+       if (folio_test_large(folio)) {
                /*
                 * The flag must be set after the refcount is bumped
                 * otherwise it may race with THP split.
                 * or unhandlable page.  The refcount is bumped iff the
                 * page is a valid handlable page.
                 */
-               SetPageHasHWPoisoned(hpage);
+               folio_set_has_hwpoisoned(folio);
                if (try_to_split_thp_page(p) < 0) {
                        res = action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
                        goto unlock_mutex;
                }
                VM_BUG_ON_PAGE(!page_count(p), p);
+               folio = page_folio(p);
        }
 
        /*
         * The check (unnecessarily) ignores LRU pages being isolated and
         * walked by the page reclaim code, however that's not a big loss.
         */
-       shake_page(p);
+       shake_folio(folio);
 
-       lock_page(p);
+       folio_lock(folio);
 
        /*
         * We're only intended to deal with the non-Compound page here.
         * race window. If this happens, we could try again to hopefully
         * handle the page next round.
         */
-       if (PageCompound(p)) {
+       if (folio_test_large(folio)) {
                if (retry) {
                        ClearPageHWPoison(p);
-                       unlock_page(p);
-                       put_page(p);
+                       folio_unlock(folio);
+                       folio_put(folio);
                        flags &= ~MF_COUNT_INCREASED;
                        retry = false;
                        goto try_again;
         * folio_remove_rmap_*() in try_to_unmap_one(). So to determine page
         * status correctly, we save a copy of the page flags at this time.
         */
-       page_flags = p->flags;
+       page_flags = folio->flags;
 
        if (hwpoison_filter(p)) {
                ClearPageHWPoison(p);
-               unlock_page(p);
-               put_page(p);
+               folio_unlock(folio);
+               folio_put(folio);
                res = -EOPNOTSUPP;
                goto unlock_mutex;
        }
 
        /*
-        * __munlock_folio() may clear a writeback page's LRU flag without
-        * page_lock. We need wait writeback completion for this page or it
-        * may trigger vfs BUG while evict inode.
+        * __munlock_folio() may clear a writeback folio's LRU flag without
+        * the folio lock. We need to wait for writeback completion for this
+        * folio or it may trigger a vfs BUG while evicting inode.
         */
-       if (!PageLRU(p) && !PageWriteback(p))
+       if (!folio_test_lru(folio) && !folio_test_writeback(folio))
                goto identify_page_state;
 
        /*
         * It's very difficult to mess with pages currently under IO
         * and in many cases impossible, so we just avoid it here.
         */
-       wait_on_page_writeback(p);
+       folio_wait_writeback(folio);
 
        /*
         * Now take care of user space mappings.
        /*
         * Torn down by someone else?
         */
-       if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
+       if (folio_test_lru(folio) && !folio_test_swapcache(folio) &&
+           folio->mapping == NULL) {
                res = action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
                goto unlock_page;
        }
        mutex_unlock(&mf_mutex);
        return res;
 unlock_page:
-       unlock_page(p);
+       folio_unlock(folio);
 unlock_mutex:
        mutex_unlock(&mf_mutex);
        return res;