int dissolve_free_huge_page(struct page *page)
 {
        int rc = -EBUSY;
+       struct folio *folio = page_folio(page);
 
 retry:
        /* Not to disrupt normal path by vainly holding hugetlb_lock */
-       if (!PageHuge(page))
+       if (!folio_test_hugetlb(folio))
                return 0;
 
        spin_lock_irq(&hugetlb_lock);
-       if (!PageHuge(page)) {
+       if (!folio_test_hugetlb(folio)) {
                rc = 0;
                goto out;
        }
 
-       if (!page_count(page)) {
-               struct page *head = compound_head(page);
-               struct hstate *h = page_hstate(head);
+       if (!folio_ref_count(folio)) {
+               struct hstate *h = folio_hstate(folio);
                if (!available_huge_pages(h))
                        goto out;
 
                 * We should make sure that the page is already on the free list
                 * when it is dissolved.
                 */
-               if (unlikely(!HPageFreed(head))) {
+               if (unlikely(!folio_test_hugetlb_freed(folio))) {
                        spin_unlock_irq(&hugetlb_lock);
                        cond_resched();
 
                        goto retry;
                }
 
-               remove_hugetlb_page(h, head, false);
+               remove_hugetlb_page(h, &folio->page, false);
                h->max_huge_pages--;
                spin_unlock_irq(&hugetlb_lock);
 
                 * Attempt to allocate vmemmmap here so that we can take
                 * appropriate action on failure.
                 */
-               rc = hugetlb_vmemmap_restore(h, head);
+               rc = hugetlb_vmemmap_restore(h, &folio->page);
                if (!rc) {
-                       update_and_free_page(h, head, false);
+                       update_and_free_page(h, &folio->page, false);
                } else {
                        spin_lock_irq(&hugetlb_lock);
-                       add_hugetlb_page(h, head, false);
+                       add_hugetlb_page(h, &folio->page, false);
                        h->max_huge_pages++;
                        spin_unlock_irq(&hugetlb_lock);
                }