/*
  * For HugeTLB page, there are more metadata to save in the struct page. But
  * the head struct page cannot meet our needs, so we have to abuse other tail
- * struct page to store the metadata. In order to avoid conflicts caused by
- * subsequent use of more tail struct pages, we gather these discrete indexes
- * of tail struct page here.
+ * struct page to store the metadata.
  */
-enum {
-       SUBPAGE_INDEX_SUBPOOL = 1,      /* reuse page->private */
-#ifdef CONFIG_CGROUP_HUGETLB
-       SUBPAGE_INDEX_CGROUP,           /* reuse page->private */
-       SUBPAGE_INDEX_CGROUP_RSVD,      /* reuse page->private */
-       __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
-#endif
-#ifdef CONFIG_MEMORY_FAILURE
-       SUBPAGE_INDEX_HWPOISON,
-#endif
-       __NR_USED_SUBPAGE,
-};
+#define __NR_USED_SUBPAGE 3
 
 struct hugepage_subpool {
        spinlock_t lock;
 
 static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
 {
-       return (void *)folio_get_private_1(folio);
+       return folio->_hugetlb_subpool;
 }
 
 /*
- * hugetlb page subpool pointer located in hpage[1].private
+ * hugetlb page subpool pointer located in hpage[2].hugetlb_subpool
  */
 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
 {
 static inline void hugetlb_set_folio_subpool(struct folio *folio,
                                        struct hugepage_subpool *subpool)
 {
-       folio_set_private_1(folio, (unsigned long)subpool);
+       folio->_hugetlb_subpool = subpool;
 }
 
 static inline void hugetlb_set_page_subpool(struct page *hpage,
 
 #ifdef CONFIG_CGROUP_HUGETLB
 /*
  * Minimum page order trackable by hugetlb cgroup.
- * At least 4 pages are necessary for all the tracking information.
- * The second tail page (hpage[SUBPAGE_INDEX_CGROUP]) is the fault
- * usage cgroup. The third tail page (hpage[SUBPAGE_INDEX_CGROUP_RSVD])
- * is the reservation usage cgroup.
+ * At least 3 pages are necessary for all the tracking information.
+ * The second tail page contains all of the hugetlb-specific fields.
  */
-#define HUGETLB_CGROUP_MIN_ORDER order_base_2(__MAX_CGROUP_SUBPAGE_INDEX + 1)
+#define HUGETLB_CGROUP_MIN_ORDER order_base_2(__NR_USED_SUBPAGE)
 
 enum hugetlb_memory_event {
        HUGETLB_MAX,
 static inline struct hugetlb_cgroup *
 __hugetlb_cgroup_from_folio(struct folio *folio, bool rsvd)
 {
-       struct page *tail;
-
        VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
        if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER)
                return NULL;
-
-       if (rsvd) {
-               tail = folio_page(folio, SUBPAGE_INDEX_CGROUP_RSVD);
-               return (void *)page_private(tail);
-       }
-
-       else {
-               tail = folio_page(folio, SUBPAGE_INDEX_CGROUP);
-               return (void *)page_private(tail);
-       }
+       if (rsvd)
+               return folio->_hugetlb_cgroup_rsvd;
+       else
+               return folio->_hugetlb_cgroup;
 }
 
 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio)
                                       struct hugetlb_cgroup *h_cg, bool rsvd)
 {
        VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
-
        if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER)
                return;
        if (rsvd)
-               set_page_private(folio_page(folio, SUBPAGE_INDEX_CGROUP_RSVD),
-                                (unsigned long)h_cg);
+               folio->_hugetlb_cgroup_rsvd = h_cg;
        else
-               set_page_private(folio_page(folio, SUBPAGE_INDEX_CGROUP),
-                                (unsigned long)h_cg);
+               folio->_hugetlb_cgroup = h_cg;
 }
 
 static inline void set_hugetlb_cgroup(struct folio *folio,
 
                        atomic_t compound_pincount;
 #ifdef CONFIG_64BIT
                        unsigned int compound_nr; /* 1 << compound_order */
-                       unsigned long _private_1;
 #endif
                };
-               struct {        /* Second tail page of compound page */
+               struct {        /* Second tail page of transparent huge page */
                        unsigned long _compound_pad_1;  /* compound_head */
                        unsigned long _compound_pad_2;
                        /* For both global and memcg */
                        struct list_head deferred_list;
                };
+               struct {        /* Second tail page of hugetlb page */
+                       unsigned long _hugetlb_pad_1;   /* compound_head */
+                       void *hugetlb_subpool;
+                       void *hugetlb_cgroup;
+                       void *hugetlb_cgroup_rsvd;
+                       void *hugetlb_hwpoison;
+                       /* No more space on 32-bit: use third tail if more */
+               };
                struct {        /* Page table pages */
                        unsigned long _pt_pad_1;        /* compound_head */
                        pgtable_t pmd_huge_pte; /* protected by page->ptl */
  *    to find how many references there are to this folio.
  * @memcg_data: Memory Control Group data.
  * @_flags_1: For large folios, additional page flags.
- * @__head: Points to the folio.  Do not use.
+ * @_head_1: Points to the folio.  Do not use.
  * @_folio_dtor: Which destructor to use for this folio.
  * @_folio_order: Do not use directly, call folio_order().
  * @_total_mapcount: Do not use directly, call folio_entire_mapcount().
  * @_pincount: Do not use directly, call folio_maybe_dma_pinned().
  * @_folio_nr_pages: Do not use directly, call folio_nr_pages().
- * @_private_1: Do not use directly, call folio_get_private_1().
+ * @_flags_2: For alignment.  Do not use.
+ * @_head_2: Points to the folio.  Do not use.
+ * @_hugetlb_subpool: Do not use directly, use accessor in hugetlb.h.
+ * @_hugetlb_cgroup: Do not use directly, use accessor in hugetlb_cgroup.h.
+ * @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h.
+ * @_hugetlb_hwpoison: Do not use directly, call raw_hwp_list_head().
  *
  * A folio is a physically, virtually and logically contiguous set
  * of bytes.  It is a power-of-two in size, and it is aligned to that
                };
                struct page page;
        };
-       unsigned long _flags_1;
-       unsigned long __head;
-       unsigned char _folio_dtor;
-       unsigned char _folio_order;
-       atomic_t _total_mapcount;
-       atomic_t _pincount;
+       union {
+               struct {
+                       unsigned long _flags_1;
+                       unsigned long _head_1;
+                       unsigned char _folio_dtor;
+                       unsigned char _folio_order;
+                       atomic_t _total_mapcount;
+                       atomic_t _pincount;
 #ifdef CONFIG_64BIT
-       unsigned int _folio_nr_pages;
+                       unsigned int _folio_nr_pages;
 #endif
-       unsigned long _private_1;
+               };
+               struct page __page_1;
+       };
+       union {
+               struct {
+                       unsigned long _flags_2;
+                       unsigned long _head_2;
+                       void *_hugetlb_subpool;
+                       void *_hugetlb_cgroup;
+                       void *_hugetlb_cgroup_rsvd;
+                       void *_hugetlb_hwpoison;
+               };
+               struct page __page_2;
+       };
 };
 
 #define FOLIO_MATCH(pg, fl)                                            \
        static_assert(offsetof(struct folio, fl) ==                     \
                        offsetof(struct page, pg) + sizeof(struct page))
 FOLIO_MATCH(flags, _flags_1);
-FOLIO_MATCH(compound_head, __head);
+FOLIO_MATCH(compound_head, _head_1);
 FOLIO_MATCH(compound_dtor, _folio_dtor);
 FOLIO_MATCH(compound_order, _folio_order);
 FOLIO_MATCH(compound_mapcount, _total_mapcount);
 FOLIO_MATCH(compound_pincount, _pincount);
 #ifdef CONFIG_64BIT
 FOLIO_MATCH(compound_nr, _folio_nr_pages);
-FOLIO_MATCH(_private_1, _private_1);
 #endif
 #undef FOLIO_MATCH
+#define FOLIO_MATCH(pg, fl)                                            \
+       static_assert(offsetof(struct folio, fl) ==                     \
+                       offsetof(struct page, pg) + 2 * sizeof(struct page))
+FOLIO_MATCH(flags, _flags_2);
+FOLIO_MATCH(compound_head, _head_2);
+FOLIO_MATCH(hugetlb_subpool, _hugetlb_subpool);
+FOLIO_MATCH(hugetlb_cgroup, _hugetlb_cgroup);
+FOLIO_MATCH(hugetlb_cgroup_rsvd, _hugetlb_cgroup_rsvd);
+FOLIO_MATCH(hugetlb_hwpoison, _hugetlb_hwpoison);
+#undef FOLIO_MATCH
 
 static inline atomic_t *folio_mapcount_ptr(struct folio *folio)
 {
        return folio->private;
 }
 
-static inline void folio_set_private_1(struct folio *folio, unsigned long private)
-{
-       folio->_private_1 = private;
-}
-
-static inline unsigned long folio_get_private_1(struct folio *folio)
-{
-       return folio->_private_1;
-}
-
 struct page_frag_cache {
        void * va;
 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)