]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: rename huge_zero_page to huge_zero_folio
authorPankaj Raghav <p.raghav@samsung.com>
Mon, 11 Aug 2025 08:41:09 +0000 (10:41 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:24:48 +0000 (17:24 -0700)
Patch series "add persistent huge zero folio support", v3.

Many places in the kernel need to zero out larger chunks, but the maximum
segment we can zero out at a time by ZERO_PAGE is limited by PAGE_SIZE.

This concern was raised during the review of adding Large Block Size
support to XFS[1][2].

This is especially annoying in block devices and filesystems where
multiple ZERO_PAGEs are attached to the bio in different bvecs.  With
multipage bvec support in block layer, it is much more efficient to send
out larger zero pages as a part of single bvec.

Some examples of places in the kernel where this could be useful:
- blkdev_issue_zero_pages()
- iomap_dio_zero()
- vmalloc.c:zero_iter()
- rxperf_process_call()
- fscrypt_zeroout_range_inline_crypt()
- bch2_checksum_update()
...

Usually huge_zero_folio is allocated on demand, and it will be deallocated
by the shrinker if there are no users of it left.  At the moment,
huge_zero_folio infrastructure refcount is tied to the process lifetime
that created it.  This might not work for bio layer as the completions can
be async and the process that created the huge_zero_folio might no longer
be alive.  And, one of the main point that came during discussion is to
have something bigger than zero page as a drop-in replacement.

Add a config option PERSISTENT_HUGE_ZERO_FOLIO that will always allocate
the huge_zero_folio, and disable the shrinker so that huge_zero_folio is
never freed.  This makes using the huge_zero_folio without having to pass
any mm struct and does not tie the lifetime of the zero folio to anything,
making it a drop-in replacement for ZERO_PAGE.

I have converted blkdev_issue_zero_pages() as an example as a part of this
series.  I also noticed close to 4% performance improvement just by
replacing ZERO_PAGE with persistent huge_zero_folio.

I will send patches to individual subsystems using the huge_zero_folio
once this gets upstreamed.

[1] https://lore.kernel.org/linux-xfs/20231027051847.GA7885@lst.de/
[2] https://lore.kernel.org/linux-xfs/ZitIK5OnR7ZNY0IG@infradead.org/

As the transition already happened from exposing huge_zero_page to
huge_zero_folio, change the name of the shrinker and the other helper
function to reflect that.

No functional changes.

Link: https://lkml.kernel.org/r/20250811084113.647267-1-kernel@pankajraghav.com
Link: https://lkml.kernel.org/r/20250811084113.647267-2-kernel@pankajraghav.com
Signed-off-by: Pankaj Raghav <p.raghav@samsung.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Suggested-by: David Hildenbrand <david@redhat.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: "Darrick J. Wong" <djwong@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Luis Chamberalin <mcgrof@kernel.org>
Cc: Mariano Pache <npache@redhat.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: "Ritesh Harjani (IBM)" <ritesh.list@gmail.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Kiryl Shutsemau <kirill@shutemov.name>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 58bac83e7fa31633aa9fbee0aa4f367c8ad65230..3f0c8c2856d3960f8dd9ff4da498407720f7a744 100644 (file)
@@ -207,7 +207,7 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
        return orders;
 }
 
-static bool get_huge_zero_page(void)
+static bool get_huge_zero_folio(void)
 {
        struct folio *zero_folio;
 retry:
@@ -237,7 +237,7 @@ retry:
        return true;
 }
 
-static void put_huge_zero_page(void)
+static void put_huge_zero_folio(void)
 {
        /*
         * Counter should never go to zero here. Only shrinker can put
@@ -251,11 +251,11 @@ struct folio *mm_get_huge_zero_folio(struct mm_struct *mm)
        if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
                return READ_ONCE(huge_zero_folio);
 
-       if (!get_huge_zero_page())
+       if (!get_huge_zero_folio())
                return NULL;
 
        if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
-               put_huge_zero_page();
+               put_huge_zero_folio();
 
        return READ_ONCE(huge_zero_folio);
 }
@@ -263,18 +263,18 @@ struct folio *mm_get_huge_zero_folio(struct mm_struct *mm)
 void mm_put_huge_zero_folio(struct mm_struct *mm)
 {
        if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
-               put_huge_zero_page();
+               put_huge_zero_folio();
 }
 
-static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
-                                       struct shrink_control *sc)
+static unsigned long shrink_huge_zero_folio_count(struct shrinker *shrink,
+                                                 struct shrink_control *sc)
 {
        /* we can free zero page only if last reference remains */
        return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
 }
 
-static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
-                                      struct shrink_control *sc)
+static unsigned long shrink_huge_zero_folio_scan(struct shrinker *shrink,
+                                                struct shrink_control *sc)
 {
        if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
                struct folio *zero_folio = xchg(&huge_zero_folio, NULL);
@@ -287,7 +287,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
        return 0;
 }
 
-static struct shrinker *huge_zero_page_shrinker;
+static struct shrinker *huge_zero_folio_shrinker;
 
 #ifdef CONFIG_SYSFS
 static ssize_t enabled_show(struct kobject *kobj,
@@ -849,8 +849,8 @@ static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
 
 static int __init thp_shrinker_init(void)
 {
-       huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero");
-       if (!huge_zero_page_shrinker)
+       huge_zero_folio_shrinker = shrinker_alloc(0, "thp-zero");
+       if (!huge_zero_folio_shrinker)
                return -ENOMEM;
 
        deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
@@ -858,13 +858,13 @@ static int __init thp_shrinker_init(void)
                                                 SHRINKER_NONSLAB,
                                                 "thp-deferred_split");
        if (!deferred_split_shrinker) {
-               shrinker_free(huge_zero_page_shrinker);
+               shrinker_free(huge_zero_folio_shrinker);
                return -ENOMEM;
        }
 
-       huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count;
-       huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan;
-       shrinker_register(huge_zero_page_shrinker);
+       huge_zero_folio_shrinker->count_objects = shrink_huge_zero_folio_count;
+       huge_zero_folio_shrinker->scan_objects = shrink_huge_zero_folio_scan;
+       shrinker_register(huge_zero_folio_shrinker);
 
        deferred_split_shrinker->count_objects = deferred_split_count;
        deferred_split_shrinker->scan_objects = deferred_split_scan;
@@ -875,7 +875,7 @@ static int __init thp_shrinker_init(void)
 
 static void __init thp_shrinker_exit(void)
 {
-       shrinker_free(huge_zero_page_shrinker);
+       shrinker_free(huge_zero_folio_shrinker);
        shrinker_free(deferred_split_shrinker);
 }