]> www.infradead.org Git - users/willy/xarray.git/commitdiff
mm: add per-order mTHP split counters
authorLance Yang <ioworker0@gmail.com>
Fri, 28 Jun 2024 13:07:49 +0000 (21:07 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Jul 2024 22:52:13 +0000 (15:52 -0700)
Patch series "mm: introduce per-order mTHP split counters", v3.

At present, the split counters in THP statistics no longer include
PTE-mapped mTHP.  Therefore, we want to introduce per-order mTHP split
counters to monitor the frequency of mTHP splits.  This will assist
developers in better analyzing and optimizing system performance.

/sys/kernel/mm/transparent_hugepage/hugepages-<size>/stats
        split
        split_failed
        split_deferred

This patch (of 2):

Currently, the split counters in THP statistics no longer include
PTE-mapped mTHP.  Therefore, we propose introducing per-order mTHP split
counters to monitor the frequency of mTHP splits.  This will help
developers better analyze and optimize system performance.

/sys/kernel/mm/transparent_hugepage/hugepages-<size>/stats
        split
        split_failed
        split_deferred

[ioworker0@gmail.com: make things more readable, per Barry and Baolin]
Link: https://lkml.kernel.org/r/20240704012905.42971-2-ioworker0@gmail.com
[ioworker0@gmail.com: use == for `order' test, per David]
Link: https://lkml.kernel.org/r/20240705113119.82210-1-ioworker0@gmail.com
Link: https://lkml.kernel.org/r/20240704012905.42971-1-ioworker0@gmail.com
Link: https://lkml.kernel.org/r/20240704012905.42971-2-ioworker0@gmail.com
Link: https://lkml.kernel.org/r/20240628130750.73097-1-ioworker0@gmail.com
Link: https://lkml.kernel.org/r/20240628130750.73097-2-ioworker0@gmail.com
Signed-off-by: Mingzhe Yang <mingzhe.yang@ly.com>
Signed-off-by: Lance Yang <ioworker0@gmail.com>
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
Acked-by: Barry Song <baohua@kernel.org>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Bang Li <libang.li@antgroup.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/huge_mm.h
mm/huge_memory.c

index 212cca384d7e4328363e06979a814d14462d49ff..cee3c5da8f0ed416343450f0e48364214f4b643e 100644 (file)
@@ -284,6 +284,9 @@ enum mthp_stat_item {
        MTHP_STAT_FILE_ALLOC,
        MTHP_STAT_FILE_FALLBACK,
        MTHP_STAT_FILE_FALLBACK_CHARGE,
+       MTHP_STAT_SPLIT,
+       MTHP_STAT_SPLIT_FAILED,
+       MTHP_STAT_SPLIT_DEFERRED,
        __MTHP_STAT_COUNT
 };
 
index 954c635759174700c7e2ee334d9677123acc79b8..17fb072a0ca16957ba86e4d1294508e3a5a20d54 100644 (file)
@@ -559,6 +559,9 @@ DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
 DEFINE_MTHP_STAT_ATTR(file_alloc, MTHP_STAT_FILE_ALLOC);
 DEFINE_MTHP_STAT_ATTR(file_fallback, MTHP_STAT_FILE_FALLBACK);
 DEFINE_MTHP_STAT_ATTR(file_fallback_charge, MTHP_STAT_FILE_FALLBACK_CHARGE);
+DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT);
+DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED);
+DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED);
 
 static struct attribute *stats_attrs[] = {
        &anon_fault_alloc_attr.attr,
@@ -569,6 +572,9 @@ static struct attribute *stats_attrs[] = {
        &file_alloc_attr.attr,
        &file_fallback_attr.attr,
        &file_fallback_charge_attr.attr,
+       &split_attr.attr,
+       &split_failed_attr.attr,
+       &split_deferred_attr.attr,
        NULL,
 };
 
@@ -3068,7 +3074,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
        XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order);
        struct anon_vma *anon_vma = NULL;
        struct address_space *mapping = NULL;
-       bool is_thp = folio_test_pmd_mappable(folio);
+       int order = folio_order(folio);
        int extra_pins, ret;
        pgoff_t end;
        bool is_hzp;
@@ -3253,8 +3259,9 @@ out_unlock:
                i_mmap_unlock_read(mapping);
 out:
        xas_destroy(&xas);
-       if (is_thp)
+       if (order == HPAGE_PMD_ORDER)
                count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
+       count_mthp_stat(order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED);
        return ret;
 }
 
@@ -3307,6 +3314,7 @@ void deferred_split_folio(struct folio *folio)
        if (list_empty(&folio->_deferred_list)) {
                if (folio_test_pmd_mappable(folio))
                        count_vm_event(THP_DEFERRED_SPLIT_PAGE);
+               count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED);
                list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
                ds_queue->split_queue_len++;
 #ifdef CONFIG_MEMCG