]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: cleanup count_mthp_stat() definition
authorRyan Roberts <ryan.roberts@arm.com>
Thu, 8 Aug 2024 11:18:46 +0000 (12:18 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 17 Aug 2024 00:53:03 +0000 (17:53 -0700)
Patch series "Shmem mTHP controls and stats improvements", v3.

This is a small series to tidy up the way the shmem controls and stats are
exposed.  These patches were previously part of the series at [2], but I
decided to split them out since they can go in independently.

This patch (of 2):

Let's move count_mthp_stat() so that it's always defined, even when THP is
disabled.  Previously uses of the function in files such as shmem.c, which
are compiled even when THP is disabled, required ugly THP ifdeferry.  With
this cleanup, we can remove those ifdefs and the function resolves to a
nop when THP is disabled.

I shortly plan to call count_mthp_stat() from more THP-invariant source
files.

Link: https://lkml.kernel.org/r/20240808111849.651867-1-ryan.roberts@arm.com
Link: https://lkml.kernel.org/r/20240808111849.651867-2-ryan.roberts@arm.com
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Acked-by: Barry Song <baohua@kernel.org>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: Lance Yang <ioworker0@gmail.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Gavin Shan <gshan@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/huge_mm.h
mm/memory.c
mm/shmem.c

index ce44caa40eed56947d48c12ea29593e1b90e2b08..34910a7d631e955ca9add280074f0d12200ff63c 100644 (file)
@@ -114,6 +114,41 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
 #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
 #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
 
+enum mthp_stat_item {
+       MTHP_STAT_ANON_FAULT_ALLOC,
+       MTHP_STAT_ANON_FAULT_FALLBACK,
+       MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
+       MTHP_STAT_SWPOUT,
+       MTHP_STAT_SWPOUT_FALLBACK,
+       MTHP_STAT_SHMEM_ALLOC,
+       MTHP_STAT_SHMEM_FALLBACK,
+       MTHP_STAT_SHMEM_FALLBACK_CHARGE,
+       MTHP_STAT_SPLIT,
+       MTHP_STAT_SPLIT_FAILED,
+       MTHP_STAT_SPLIT_DEFERRED,
+       __MTHP_STAT_COUNT
+};
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
+struct mthp_stat {
+       unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
+};
+
+DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
+
+static inline void count_mthp_stat(int order, enum mthp_stat_item item)
+{
+       if (order <= 0 || order > PMD_ORDER)
+               return;
+
+       this_cpu_inc(mthp_stats.stats[order][item]);
+}
+#else
+static inline void count_mthp_stat(int order, enum mthp_stat_item item)
+{
+}
+#endif
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 
 extern unsigned long transparent_hugepage_flags;
@@ -269,41 +304,6 @@ struct thpsize {
 
 #define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
 
-enum mthp_stat_item {
-       MTHP_STAT_ANON_FAULT_ALLOC,
-       MTHP_STAT_ANON_FAULT_FALLBACK,
-       MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
-       MTHP_STAT_SWPOUT,
-       MTHP_STAT_SWPOUT_FALLBACK,
-       MTHP_STAT_SHMEM_ALLOC,
-       MTHP_STAT_SHMEM_FALLBACK,
-       MTHP_STAT_SHMEM_FALLBACK_CHARGE,
-       MTHP_STAT_SPLIT,
-       MTHP_STAT_SPLIT_FAILED,
-       MTHP_STAT_SPLIT_DEFERRED,
-       __MTHP_STAT_COUNT
-};
-
-struct mthp_stat {
-       unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
-};
-
-#ifdef CONFIG_SYSFS
-DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
-
-static inline void count_mthp_stat(int order, enum mthp_stat_item item)
-{
-       if (order <= 0 || order > PMD_ORDER)
-               return;
-
-       this_cpu_inc(mthp_stats.stats[order][item]);
-}
-#else
-static inline void count_mthp_stat(int order, enum mthp_stat_item item)
-{
-}
-#endif
-
 #define transparent_hugepage_use_zero_page()                           \
        (transparent_hugepage_flags &                                   \
         (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
index 2ca87ceafede2d181a484e8de82d7d3aa548c895..b4e384b261c952388652a1ede9ec892474d8728a 100644 (file)
@@ -4595,9 +4595,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 
        folio_ref_add(folio, nr_pages - 1);
        add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
        count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC);
-#endif
        folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
        folio_add_lru_vma(folio, vma);
 setpte:
index 4a5254bfd61061e2a1be0ca78f69576e1c1198e7..06fcf82b61c83bd5afe9a101174f1e904c0f799c 100644 (file)
@@ -1783,9 +1783,7 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
 
                        if (pages == HPAGE_PMD_NR)
                                count_vm_event(THP_FILE_FALLBACK);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
                        count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK);
-#endif
                        order = next_order(&suitable_orders, order);
                }
        } else {
@@ -1810,10 +1808,8 @@ allocated:
                                count_vm_event(THP_FILE_FALLBACK);
                                count_vm_event(THP_FILE_FALLBACK_CHARGE);
                        }
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
                        count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK);
                        count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE);
-#endif
                }
                goto unlock;
        }
@@ -2181,9 +2177,7 @@ repeat:
                if (!IS_ERR(folio)) {
                        if (folio_test_pmd_mappable(folio))
                                count_vm_event(THP_FILE_ALLOC);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
                        count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC);
-#endif
                        goto alloced;
                }
                if (PTR_ERR(folio) == -EEXIST)