]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/rmap: do __folio_mod_stat() in __folio_add_rmap()
authorWei Yang <richard.weiyang@gmail.com>
Mon, 4 Aug 2025 06:41:06 +0000 (06:41 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:24:41 +0000 (17:24 -0700)
It is required to modify folio statistic after rmap changes, so it looks
reasonable to do it in __folio_add_rmap(), which is the current behavior
of __folio_remove_rmap() and folio_add_new_anon_rmap().

Call __folio_mod_stat() in __folio_add_rmap(), so that rmap adjustment
family shares the same pattern.

Link: https://lkml.kernel.org/r/20250804064106.21269-1-richard.weiyang@gmail.com
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Harry Yoo <harry.yoo@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/rmap.c

index 568198e9efc2927f540518f88e9ada9025248154..84a8d8b02ef77e14f2d5fac5a6b8f70a675615da 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1241,13 +1241,35 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
        return page_vma_mkclean_one(&pvmw);
 }
 
-static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
+static void __folio_mod_stat(struct folio *folio, int nr, int nr_pmdmapped)
+{
+       int idx;
+
+       if (nr) {
+               idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED;
+               __lruvec_stat_mod_folio(folio, idx, nr);
+       }
+       if (nr_pmdmapped) {
+               if (folio_test_anon(folio)) {
+                       idx = NR_ANON_THPS;
+                       __lruvec_stat_mod_folio(folio, idx, nr_pmdmapped);
+               } else {
+                       /* NR_*_PMDMAPPED are not maintained per-memcg */
+                       idx = folio_test_swapbacked(folio) ?
+                               NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED;
+                       __mod_node_page_state(folio_pgdat(folio), idx,
+                                             nr_pmdmapped);
+               }
+       }
+}
+
+static __always_inline void __folio_add_rmap(struct folio *folio,
                struct page *page, int nr_pages, struct vm_area_struct *vma,
-               enum rmap_level level, int *nr_pmdmapped)
+               enum rmap_level level)
 {
        atomic_t *mapped = &folio->_nr_pages_mapped;
        const int orig_nr_pages = nr_pages;
-       int first = 0, nr = 0;
+       int first = 0, nr = 0, nr_pmdmapped = 0;
 
        __folio_rmap_sanity_checks(folio, page, nr_pages, level);
 
@@ -1283,7 +1305,7 @@ static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
                first = atomic_inc_and_test(&folio->_entire_mapcount);
                if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
                        if (level == RMAP_LEVEL_PMD && first)
-                               *nr_pmdmapped = folio_large_nr_pages(folio);
+                               nr_pmdmapped = folio_large_nr_pages(folio);
                        nr = folio_inc_return_large_mapcount(folio, vma);
                        if (nr == 1)
                                /* Was completely unmapped. */
@@ -1302,7 +1324,7 @@ static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
                                 * folios separately.
                                 */
                                if (level == RMAP_LEVEL_PMD)
-                                       *nr_pmdmapped = nr_pages;
+                                       nr_pmdmapped = nr_pages;
                                nr = nr_pages - (nr & FOLIO_PAGES_MAPPED);
                                /* Raced ahead of a remove and another add? */
                                if (unlikely(nr < 0))
@@ -1315,7 +1337,7 @@ static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
                folio_inc_large_mapcount(folio, vma);
                break;
        }
-       return nr;
+       __folio_mod_stat(folio, nr, nr_pmdmapped);
 }
 
 /**
@@ -1403,43 +1425,19 @@ static void __page_check_anon_rmap(const struct folio *folio,
                       page);
 }
 
-static void __folio_mod_stat(struct folio *folio, int nr, int nr_pmdmapped)
-{
-       int idx;
-
-       if (nr) {
-               idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED;
-               __lruvec_stat_mod_folio(folio, idx, nr);
-       }
-       if (nr_pmdmapped) {
-               if (folio_test_anon(folio)) {
-                       idx = NR_ANON_THPS;
-                       __lruvec_stat_mod_folio(folio, idx, nr_pmdmapped);
-               } else {
-                       /* NR_*_PMDMAPPED are not maintained per-memcg */
-                       idx = folio_test_swapbacked(folio) ?
-                               NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED;
-                       __mod_node_page_state(folio_pgdat(folio), idx,
-                                             nr_pmdmapped);
-               }
-       }
-}
-
 static __always_inline void __folio_add_anon_rmap(struct folio *folio,
                struct page *page, int nr_pages, struct vm_area_struct *vma,
                unsigned long address, rmap_t flags, enum rmap_level level)
 {
-       int i, nr, nr_pmdmapped = 0;
+       int i;
 
        VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
 
-       nr = __folio_add_rmap(folio, page, nr_pages, vma, level, &nr_pmdmapped);
+       __folio_add_rmap(folio, page, nr_pages, vma, level);
 
        if (likely(!folio_test_ksm(folio)))
                __page_check_anon_rmap(folio, page, vma, address);
 
-       __folio_mod_stat(folio, nr, nr_pmdmapped);
-
        if (flags & RMAP_EXCLUSIVE) {
                switch (level) {
                case RMAP_LEVEL_PTE:
@@ -1613,12 +1611,9 @@ static __always_inline void __folio_add_file_rmap(struct folio *folio,
                struct page *page, int nr_pages, struct vm_area_struct *vma,
                enum rmap_level level)
 {
-       int nr, nr_pmdmapped = 0;
-
        VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
 
-       nr = __folio_add_rmap(folio, page, nr_pages, vma, level, &nr_pmdmapped);
-       __folio_mod_stat(folio, nr, nr_pmdmapped);
+       __folio_add_rmap(folio, page, nr_pages, vma, level);
 
        /* See comments in folio_add_anon_rmap_*() */
        if (!folio_test_large(folio))