]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
mm: memcg: add THP swap out info for anonymous reclaim
authorXin Hao <vernhao@tencent.com>
Wed, 13 Sep 2023 16:49:37 +0000 (00:49 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 4 Oct 2023 17:32:27 +0000 (10:32 -0700)
At present, we support per-memcg reclaim strategy, however we do not know
the number of transparent huge pages being reclaimed, as we know the
transparent huge pages need to be splited before reclaim them, and they
will bring some performance bottleneck effect.  for example, when two
memcg (A & B) are doing reclaim for anonymous pages at same time, and 'A'
memcg is reclaiming a large number of transparent huge pages, we can
better analyze that the performance bottleneck will be caused by 'A'
memcg.  therefore, in order to better analyze such problems, there add THP
swap out info for per-memcg.

[akpm@linux-foundation.orgL fix swap_writepage_fs(), per Johannes]
Link: https://lkml.kernel.org/r/20230913213343.GB48476@cmpxchg.org
Link: https://lkml.kernel.org/r/20230913164938.16918-1-vernhao@tencent.com
Signed-off-by: Xin Hao <vernhao@tencent.com>
Suggested-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Documentation/admin-guide/cgroup-v2.rst
mm/memcontrol.c
mm/page_io.c
mm/vmscan.c

index b26b5274eaaf140ed8ccb617df2eca53a166e8bd..622a7f28db1fd54327c6cd02c489e889937390d2 100644 (file)
@@ -1532,6 +1532,15 @@ PAGE_SIZE multiple when read back.
                collapsing an existing range of pages. This counter is not
                present when CONFIG_TRANSPARENT_HUGEPAGE is not set.
 
+         thp_swpout (npn)
+               Number of transparent hugepages which are swapout in one piece
+               without splitting.
+
+         thp_swpout_fallback (npn)
+               Number of transparent hugepages which were split before swapout.
+               Usually because failed to allocate some continuous swap space
+               for the huge page.
+
   memory.numa_stat
        A read-only nested-keyed file which exists on non-root cgroups.
 
index 5b009b233ab8921110148de54b6b96e69d5ab638..68313331971c65165e0143396a9f16841781f869 100644 (file)
@@ -704,6 +704,8 @@ static const unsigned int memcg_vm_event_stat[] = {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        THP_FAULT_ALLOC,
        THP_COLLAPSE_ALLOC,
+       THP_SWPOUT,
+       THP_SWPOUT_FALLBACK,
 #endif
 };
 
index fe4c21af23f269a6bdc913e967f855007f8ccada..cb559ae324c6723e286701d7fa026644f62bc749 100644 (file)
@@ -208,8 +208,10 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
 static inline void count_swpout_vm_event(struct folio *folio)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       if (unlikely(folio_test_pmd_mappable(folio)))
+       if (unlikely(folio_test_pmd_mappable(folio))) {
+               count_memcg_folio_events(folio, THP_SWPOUT, 1);
                count_vm_event(THP_SWPOUT);
+       }
 #endif
        count_vm_events(PSWPOUT, folio_nr_pages(folio));
 }
@@ -278,9 +280,6 @@ static void sio_write_complete(struct kiocb *iocb, long ret)
                        set_page_dirty(page);
                        ClearPageReclaim(page);
                }
-       } else {
-               for (p = 0; p < sio->pages; p++)
-                       count_swpout_vm_event(page_folio(sio->bvec[p].bv_page));
        }
 
        for (p = 0; p < sio->pages; p++)
@@ -296,6 +295,7 @@ static void swap_writepage_fs(struct page *page, struct writeback_control *wbc)
        struct file *swap_file = sis->swap_file;
        loff_t pos = page_file_offset(page);
 
+       count_swpout_vm_event(page_folio(page));
        set_page_writeback(page);
        unlock_page(page);
        if (wbc->swap_plug)
index 8a3f83e0231e727ebd6d469a32a3b5cc186c491c..acf115468bf8a0382f58260ea6eb3b3c7047b16f 100644 (file)
@@ -1214,6 +1214,7 @@ retry:
                                                                folio_list))
                                                goto activate_locked;
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+                                       count_memcg_folio_events(folio, THP_SWPOUT_FALLBACK, 1);
                                        count_vm_event(THP_SWPOUT_FALLBACK);
 #endif
                                        if (!add_to_swap(folio))