]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: khugepaged: move some khugepaged_* functions to khugepaged.c
authorYang Shi <shy828301@gmail.com>
Thu, 14 Apr 2022 19:16:46 +0000 (12:16 -0700)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 14 Apr 2022 21:49:50 +0000 (17:49 -0400)
To reuse hugepage_vma_check() for khugepaged_enter() so that we could
remove some duplicate code.  But moving hugepage_vma_check() to
khugepaged.h needs to include huge_mm.h in it, it seems not optimal to
bloat khugepaged.h.

And the khugepaged_* functions actually are wrappers for some non-inline
functions, so it seems the benefits are not too much to keep them inline.

So move the khugepaged_* functions to khugepaged.c, any callers just need
to include khugepaged.h which is quite small.  For example, the following
patches will call khugepaged_enter() in filemap page fault path for
regular filesystems to make readonly FS THP collapse more consistent.  The
filemap.c just needs to include khugepaged.h.

Link: https://lkml.kernel.org/r/20220404200250.321455-7-shy828301@gmail.com
Signed-off-by: Yang Shi <shy828301@gmail.com>
Acked-by: Song Liu <song@kernel.org>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/khugepaged.h
mm/khugepaged.c

index 0423d3619f26303b974a09ff12bb82173ed46e78..6acf9701151e72cdb0258edfc118f1067d09a10d 100644 (file)
@@ -2,10 +2,6 @@
 #ifndef _LINUX_KHUGEPAGED_H
 #define _LINUX_KHUGEPAGED_H
 
-#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
-#include <linux/shmem_fs.h>
-
-
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 extern struct attribute_group khugepaged_attr_group;
 
@@ -16,6 +12,12 @@ extern void __khugepaged_enter(struct mm_struct *mm);
 extern void __khugepaged_exit(struct mm_struct *mm);
 extern void khugepaged_enter_vma_merge(struct vm_area_struct *vma,
                                       unsigned long vm_flags);
+extern void khugepaged_fork(struct mm_struct *mm,
+                           struct mm_struct *oldmm);
+extern void khugepaged_exit(struct mm_struct *mm);
+extern void khugepaged_enter(struct vm_area_struct *vma,
+                            unsigned long vm_flags);
+
 extern void khugepaged_min_free_kbytes_update(void);
 #ifdef CONFIG_SHMEM
 extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr);
@@ -33,36 +35,9 @@ static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
 #define khugepaged_always()                            \
        (transparent_hugepage_flags &                   \
         (1<<TRANSPARENT_HUGEPAGE_FLAG))
-#define khugepaged_req_madv()                                  \
-       (transparent_hugepage_flags &                           \
-        (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
 #define khugepaged_defrag()                                    \
        (transparent_hugepage_flags &                           \
         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
-
-static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
-{
-       if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
-               __khugepaged_enter(mm);
-}
-
-static inline void khugepaged_exit(struct mm_struct *mm)
-{
-       if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
-               __khugepaged_exit(mm);
-}
-
-static inline void khugepaged_enter(struct vm_area_struct *vma,
-                                  unsigned long vm_flags)
-{
-       if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
-               if ((khugepaged_always() ||
-                    (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) ||
-                    (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
-                   !(vm_flags & VM_NOHUGEPAGE) &&
-                   !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
-                       __khugepaged_enter(vma->vm_mm);
-}
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
 static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
 {
index 821950422dccc1591496454ac87dc92ee7a0fd1f..99275721af1899e532c49e788f94efaf271f98bc 100644 (file)
@@ -556,6 +556,26 @@ void __khugepaged_exit(struct mm_struct *mm)
        }
 }
 
+void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+       if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
+               __khugepaged_enter(mm);
+}
+
+void khugepaged_exit(struct mm_struct *mm)
+{
+       if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
+               __khugepaged_exit(mm);
+}
+
+void khugepaged_enter(struct vm_area_struct *vma, unsigned long vm_flags)
+{
+       if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
+           khugepaged_enabled())
+               if (hugepage_vma_check(vma, vm_flags))
+                       __khugepaged_enter(vma->vm_mm);
+}
+
 static void release_pte_page(struct page *page)
 {
        mod_node_page_state(page_pgdat(page),