for (i = 1; i < nr_pages; i++) {
                p = folio_page(folio, i);
+               p->flags &= ~PAGE_FLAGS_CHECK_AT_FREE;
                p->mapping = NULL;
                clear_compound_head(p);
                if (!demote)
 static void __update_and_free_hugetlb_folio(struct hstate *h,
                                                struct folio *folio)
 {
-       int i;
-       struct page *subpage;
        bool clear_dtor = folio_test_hugetlb_vmemmap_optimized(folio);
 
        if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
                spin_unlock_irq(&hugetlb_lock);
        }
 
-       for (i = 0; i < pages_per_huge_page(h); i++) {
-               subpage = folio_page(folio, i);
-               subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
-                               1 << PG_referenced | 1 << PG_dirty |
-                               1 << PG_active | 1 << PG_private |
-                               1 << PG_writeback);
-       }
-
        /*
         * Non-gigantic pages demoted from CMA allocated gigantic pages
         * need to be given back to CMA in free_gigantic_folio.