]> www.infradead.org Git - users/willy/xarray.git/commitdiff
mm: cleanup count_mthp_stat() definition
authorRyan Roberts <ryan.roberts@arm.com>
Thu, 8 Aug 2024 11:18:46 +0000 (12:18 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 9 Sep 2024 23:38:57 +0000 (16:38 -0700)
Patch series "Shmem mTHP controls and stats improvements", v3.

This is a small series to tidy up the way the shmem controls and stats are
exposed.  These patches were previously part of the series at [2], but I
decided to split them out since they can go in independently.

This patch (of 2):

Let's move count_mthp_stat() so that it's always defined, even when THP is
disabled.  Previously uses of the function in files such as shmem.c, which
are compiled even when THP is disabled, required ugly THP ifdeferry.  With
this cleanup, we can remove those ifdefs and the function resolves to a
nop when THP is disabled.

I shortly plan to call count_mthp_stat() from more THP-invariant source
files.

Link: https://lkml.kernel.org/r/20240808111849.651867-1-ryan.roberts@arm.com
Link: https://lkml.kernel.org/r/20240808111849.651867-2-ryan.roberts@arm.com
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Acked-by: Barry Song <baohua@kernel.org>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: Lance Yang <ioworker0@gmail.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Gavin Shan <gshan@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/huge_mm.h
mm/memory.c
mm/shmem.c

index 6370026689e0b3a5d6741b9970d254730b9627d9..4c32058cacfec940a12eee16263e168790397e46 100644 (file)
@@ -114,6 +114,41 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
 #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
 #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
 
+enum mthp_stat_item {
+       MTHP_STAT_ANON_FAULT_ALLOC,
+       MTHP_STAT_ANON_FAULT_FALLBACK,
+       MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
+       MTHP_STAT_SWPOUT,
+       MTHP_STAT_SWPOUT_FALLBACK,
+       MTHP_STAT_SHMEM_ALLOC,
+       MTHP_STAT_SHMEM_FALLBACK,
+       MTHP_STAT_SHMEM_FALLBACK_CHARGE,
+       MTHP_STAT_SPLIT,
+       MTHP_STAT_SPLIT_FAILED,
+       MTHP_STAT_SPLIT_DEFERRED,
+       __MTHP_STAT_COUNT
+};
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
+struct mthp_stat {
+       unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
+};
+
+DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
+
+static inline void count_mthp_stat(int order, enum mthp_stat_item item)
+{
+       if (order <= 0 || order > PMD_ORDER)
+               return;
+
+       this_cpu_inc(mthp_stats.stats[order][item]);
+}
+#else
+static inline void count_mthp_stat(int order, enum mthp_stat_item item)
+{
+}
+#endif
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 
 extern unsigned long transparent_hugepage_flags;
@@ -269,41 +304,6 @@ struct thpsize {
 
 #define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
 
-enum mthp_stat_item {
-       MTHP_STAT_ANON_FAULT_ALLOC,
-       MTHP_STAT_ANON_FAULT_FALLBACK,
-       MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
-       MTHP_STAT_SWPOUT,
-       MTHP_STAT_SWPOUT_FALLBACK,
-       MTHP_STAT_SHMEM_ALLOC,
-       MTHP_STAT_SHMEM_FALLBACK,
-       MTHP_STAT_SHMEM_FALLBACK_CHARGE,
-       MTHP_STAT_SPLIT,
-       MTHP_STAT_SPLIT_FAILED,
-       MTHP_STAT_SPLIT_DEFERRED,
-       __MTHP_STAT_COUNT
-};
-
-struct mthp_stat {
-       unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
-};
-
-#ifdef CONFIG_SYSFS
-DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
-
-static inline void count_mthp_stat(int order, enum mthp_stat_item item)
-{
-       if (order <= 0 || order > PMD_ORDER)
-               return;
-
-       this_cpu_inc(mthp_stats.stats[order][item]);
-}
-#else
-static inline void count_mthp_stat(int order, enum mthp_stat_item item)
-{
-}
-#endif
-
 #define transparent_hugepage_use_zero_page()                           \
        (transparent_hugepage_flags &                                   \
         (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
index c31ea300cdf6d6c2c63c6a2db87c73bebaab5dab..93c0c25433d021d5ec6caa9c60ed9f6e4bde9d8e 100644 (file)
@@ -4595,9 +4595,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 
        folio_ref_add(folio, nr_pages - 1);
        add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
        count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC);
-#endif
        folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
        folio_add_lru_vma(folio, vma);
 setpte:
index 866d46d0c43dcbdd6b8d0717f456ace9b74902b8..800cec9dc534e515d32469ada52744d1adefe306 100644 (file)
@@ -1808,9 +1808,7 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
 
                        if (pages == HPAGE_PMD_NR)
                                count_vm_event(THP_FILE_FALLBACK);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
                        count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK);
-#endif
                        order = next_order(&suitable_orders, order);
                }
        } else {
@@ -1835,10 +1833,8 @@ allocated:
                                count_vm_event(THP_FILE_FALLBACK);
                                count_vm_event(THP_FILE_FALLBACK_CHARGE);
                        }
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
                        count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK);
                        count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE);
-#endif
                }
                goto unlock;
        }
@@ -2332,9 +2328,7 @@ repeat:
                if (!IS_ERR(folio)) {
                        if (folio_test_pmd_mappable(folio))
                                count_vm_event(THP_FILE_ALLOC);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
                        count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC);
-#endif
                        goto alloced;
                }
                if (PTR_ERR(folio) == -EEXIST)