]> www.infradead.org Git - linux.git/commitdiff
mm, hugetlb: remove HUGETLB_CGROUP_MIN_ORDER
authorFrank van der Linden <fvdl@google.com>
Wed, 4 Oct 2023 15:32:48 +0000 (15:32 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 18 Oct 2023 21:34:17 +0000 (14:34 -0700)
Originally, hugetlb_cgroup was the only hugetlb user of tail page
structure fields.  So, the code defined and checked against
HUGETLB_CGROUP_MIN_ORDER to make sure pages weren't too small to use.

However, by now, tail page #2 is used to store hugetlb hwpoison and
subpool information as well.  In other words, without that tail page
hugetlb doesn't work.

Acknowledge this fact by getting rid of HUGETLB_CGROUP_MIN_ORDER and
checks against it.  Instead, just check for the minimum viable page order
at hstate creation time.

Link: https://lkml.kernel.org/r/20231004153248.3842997-1-fvdl@google.com
Signed-off-by: Frank van der Linden <fvdl@google.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/hugetlb_cgroup.h
mm/hugetlb.c
mm/hugetlb_cgroup.c

index 3d82d91f49acb04c7c123d9210c77eb6d3c7916c..e5d64b8b59c20271c5e5d8b22fcb040758ea9637 100644 (file)
@@ -22,13 +22,6 @@ struct resv_map;
 struct file_region;
 
 #ifdef CONFIG_CGROUP_HUGETLB
-/*
- * Minimum page order trackable by hugetlb cgroup.
- * At least 3 pages are necessary for all the tracking information.
- * The second tail page contains all of the hugetlb-specific fields.
- */
-#define HUGETLB_CGROUP_MIN_ORDER order_base_2(__NR_USED_SUBPAGE)
-
 enum hugetlb_memory_event {
        HUGETLB_MAX,
        HUGETLB_NR_MEMORY_EVENTS,
@@ -68,8 +61,6 @@ static inline struct hugetlb_cgroup *
 __hugetlb_cgroup_from_folio(struct folio *folio, bool rsvd)
 {
        VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
-       if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER)
-               return NULL;
        if (rsvd)
                return folio->_hugetlb_cgroup_rsvd;
        else
@@ -91,8 +82,6 @@ static inline void __set_hugetlb_cgroup(struct folio *folio,
                                       struct hugetlb_cgroup *h_cg, bool rsvd)
 {
        VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
-       if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER)
-               return;
        if (rsvd)
                folio->_hugetlb_cgroup_rsvd = h_cg;
        else
index 7ad9d2159da4260883cb8431ee184fb970dcd3a9..e2b1c417b90ae441099faba6ad2a3a77919e4a87 100644 (file)
@@ -4361,7 +4361,7 @@ void __init hugetlb_add_hstate(unsigned int order)
                return;
        }
        BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
-       BUG_ON(order == 0);
+       BUG_ON(order < order_base_2(__NR_USED_SUBPAGE));
        h = &hstates[hugetlb_max_hstate++];
        mutex_init(&h->resize_lock);
        h->order = order;
index dedd2edb076ec211c2078651335bcc56066b516d..aa4486bd390493b4bfc45aa05ee0353785388191 100644 (file)
@@ -262,12 +262,6 @@ static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
 
        if (hugetlb_cgroup_disabled())
                goto done;
-       /*
-        * We don't charge any cgroup if the compound page have less
-        * than 3 pages.
-        */
-       if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
-               goto done;
 again:
        rcu_read_lock();
        h_cg = hugetlb_cgroup_from_task(current);
@@ -397,9 +391,6 @@ static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
        if (hugetlb_cgroup_disabled() || !h_cg)
                return;
 
-       if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
-               return;
-
        page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
                                                                   rsvd),
                              nr_pages);
@@ -869,15 +860,8 @@ void __init hugetlb_cgroup_file_init(void)
 {
        struct hstate *h;
 
-       for_each_hstate(h) {
-               /*
-                * Add cgroup control files only if the huge page consists
-                * of more than two normal pages. This is because we use
-                * page[2].private for storing cgroup details.
-                */
-               if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
-                       __hugetlb_cgroup_file_init(hstate_index(h));
-       }
+       for_each_hstate(h)
+               __hugetlb_cgroup_file_init(hstate_index(h));
 }
 
 /*