]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: khugepaged: introduce khugepaged_enter_vma() helper
authorYang Shi <shy828301@gmail.com>
Thu, 14 Apr 2022 19:16:46 +0000 (12:16 -0700)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 14 Apr 2022 21:49:50 +0000 (17:49 -0400)
The khugepaged_enter_vma_merge() actually does as the same thing as the
khugepaged_enter() section called by shmem_mmap(), so consolidate them
into one helper and rename it to khugepaged_enter_vma().

Link: https://lkml.kernel.org/r/20220404200250.321455-8-shy828301@gmail.com
Signed-off-by: Yang Shi <shy828301@gmail.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Song Liu <song@kernel.org>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/khugepaged.h
mm/khugepaged.c
mm/mmap.c
mm/shmem.c

index 6acf9701151e72cdb0258edfc118f1067d09a10d..f4b12be155ab594d3c545817dd4f8e370c7c7f77 100644 (file)
@@ -10,8 +10,8 @@ extern void khugepaged_destroy(void);
 extern int start_stop_khugepaged(void);
 extern void __khugepaged_enter(struct mm_struct *mm);
 extern void __khugepaged_exit(struct mm_struct *mm);
-extern void khugepaged_enter_vma_merge(struct vm_area_struct *vma,
-                                      unsigned long vm_flags);
+extern void khugepaged_enter_vma(struct vm_area_struct *vma,
+                                unsigned long vm_flags);
 extern void khugepaged_fork(struct mm_struct *mm,
                            struct mm_struct *oldmm);
 extern void khugepaged_exit(struct mm_struct *mm);
@@ -49,8 +49,8 @@ static inline void khugepaged_enter(struct vm_area_struct *vma,
                                    unsigned long vm_flags)
 {
 }
-static inline void khugepaged_enter_vma_merge(struct vm_area_struct *vma,
-                                             unsigned long vm_flags)
+static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
+                                       unsigned long vm_flags)
 {
 }
 static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
index 99275721af1899e532c49e788f94efaf271f98bc..f02e8a53e54c5218050fb0ba543f2d1bf7607b77 100644 (file)
@@ -365,7 +365,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
                 * register it here without waiting a page fault that
                 * may not happen any time soon.
                 */
-               khugepaged_enter_vma_merge(vma, *vm_flags);
+               khugepaged_enter_vma(vma, *vm_flags);
                break;
        case MADV_NOHUGEPAGE:
                *vm_flags &= ~VM_HUGEPAGE;
@@ -505,23 +505,15 @@ void __khugepaged_enter(struct mm_struct *mm)
                wake_up_interruptible(&khugepaged_wait);
 }
 
-void khugepaged_enter_vma_merge(struct vm_area_struct *vma,
-                              unsigned long vm_flags)
+void khugepaged_enter_vma(struct vm_area_struct *vma,
+                         unsigned long vm_flags)
 {
-       unsigned long hstart, hend;
-
-       /*
-        * khugepaged only supports read-only files for non-shmem files.
-        * khugepaged does not yet work on special mappings. And
-        * file-private shmem THP is not supported.
-        */
-       if (!hugepage_vma_check(vma, vm_flags))
-               return;
-
-       hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
-       hend = vma->vm_end & HPAGE_PMD_MASK;
-       if (hstart < hend)
-               khugepaged_enter(vma, vm_flags);
+       if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
+           khugepaged_enabled() &&
+           (((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
+            (vma->vm_end & HPAGE_PMD_MASK)))
+               if (hugepage_vma_check(vma, vm_flags))
+                       __khugepaged_enter(vma->vm_mm);
 }
 
 void __khugepaged_exit(struct mm_struct *mm)
index f3b8ee95811a8be528b1f0f65a043ce6d5113b00..c335a34b58d863822f0c54f92e842176cedcfa5c 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1086,7 +1086,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                                         end, prev->vm_pgoff, NULL, prev);
                if (err)
                        return NULL;
-               khugepaged_enter_vma_merge(prev, vm_flags);
+               khugepaged_enter_vma(prev, vm_flags);
                return prev;
        }
 
@@ -1113,7 +1113,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                }
                if (err)
                        return NULL;
-               khugepaged_enter_vma_merge(area, vm_flags);
+               khugepaged_enter_vma(area, vm_flags);
                return area;
        }
 
@@ -2012,7 +2012,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                }
        }
        anon_vma_unlock_write(vma->anon_vma);
-       khugepaged_enter_vma_merge(vma, vma->vm_flags);
+       khugepaged_enter_vma(vma, vma->vm_flags);
        return error;
 }
 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
@@ -2088,7 +2088,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
                }
        }
        anon_vma_unlock_write(vma->anon_vma);
-       khugepaged_enter_vma_merge(vma, vma->vm_flags);
+       khugepaged_enter_vma(vma, vma->vm_flags);
        return error;
 }
 
index 913c0b039333757e473c3858a5903e6134c7ba77..6163ef5acbcd95c152aeccfa88209c1cca3f9abd 100644 (file)
@@ -2240,11 +2240,7 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
 
        file_accessed(file);
        vma->vm_ops = &shmem_vm_ops;
-       if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
-                       ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
-                       (vma->vm_end & HPAGE_PMD_MASK)) {
-               khugepaged_enter(vma, vma->vm_flags);
-       }
+       khugepaged_enter_vma(vma, vma->vm_flags);
        return 0;
 }
 
@@ -4143,11 +4139,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
        vma->vm_file = file;
        vma->vm_ops = &shmem_vm_ops;
 
-       if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
-                       ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
-                       (vma->vm_end & HPAGE_PMD_MASK)) {
-               khugepaged_enter(vma, vma->vm_flags);
-       }
+       khugepaged_enter_vma(vma, vma->vm_flags);
 
        return 0;
 }