]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/khugepaged: add flag to predicate khugepaged-only behavior
authorZach O'Keefe <zokeefe@google.com>
Wed, 6 Jul 2022 23:59:24 +0000 (16:59 -0700)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Wed, 3 Aug 2022 18:17:10 +0000 (14:17 -0400)
Add .is_khugepaged flag to struct collapse_control so khugepaged-specific
behavior can be elided by MADV_COLLAPSE context.

Start by protecting khugepaged-specific heuristics by this flag.  In
MADV_COLLAPSE, the user presumably has reason to believe the collapse will
be beneficial and khugepaged heuristics shouldn't prevent the user from
doing so:

1) sysfs-controlled knobs khugepaged_max_ptes_[none|swap|shared]

2) requirement that some pages in region being collapsed be young or
   referenced

Link: https://lkml.kernel.org/r/20220706235936.2197195-7-zokeefe@google.com
Signed-off-by: Zach O'Keefe <zokeefe@google.com>
Reviewed-by: Yang Shi <shy828301@gmail.com>
Cc: Alex Shi <alex.shi@linux.alibaba.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Chris Kennelly <ckennelly@google.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Pavel Begunkov <asml.silence@gmail.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Rongwei Wang <rongwei.wang@linux.alibaba.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: "Souptick Joarder (HPE)" <jrdr.linux@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/khugepaged.c

index ca5d013a2a8d0808d757cdbabe3242a1dd02a68a..3128c7fe974257f0699fcfaee48e8043968b5f58 100644 (file)
@@ -73,6 +73,8 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
  * default collapse hugepages if there is at least one pte mapped like
  * it would have happened if the vma was large enough during page
  * fault.
+ *
+ * Note that these are only respected if collapse was initiated by khugepaged.
  */
 static unsigned int khugepaged_max_ptes_none __read_mostly;
 static unsigned int khugepaged_max_ptes_swap __read_mostly;
@@ -86,6 +88,8 @@ static struct kmem_cache *mm_slot_cache __read_mostly;
 #define MAX_PTE_MAPPED_THP 8
 
 struct collapse_control {
+       bool is_khugepaged;
+
        /* Num pages scanned per node */
 #if HPAGE_PMD_ORDER < 16
        u16 node_load[MAX_NUMNODES];
@@ -557,6 +561,7 @@ static bool is_refcount_suitable(struct page *page)
 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                                        unsigned long address,
                                        pte_t *pte,
+                                       struct collapse_control *cc,
                                        struct list_head *compound_pagelist)
 {
        struct page *page = NULL;
@@ -570,7 +575,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                if (pte_none(pteval) || (pte_present(pteval) &&
                                is_zero_pfn(pte_pfn(pteval)))) {
                        if (!userfaultfd_armed(vma) &&
-                           ++none_or_zero <= khugepaged_max_ptes_none) {
+                           (++none_or_zero <= khugepaged_max_ptes_none ||
+                            !cc->is_khugepaged)) {
                                continue;
                        } else {
                                result = SCAN_EXCEED_NONE_PTE;
@@ -590,8 +596,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
 
                VM_BUG_ON_PAGE(!PageAnon(page), page);
 
-               if (page_mapcount(page) > 1 &&
-                               ++shared > khugepaged_max_ptes_shared) {
+               if (cc->is_khugepaged && page_mapcount(page) > 1 &&
+                   ++shared > khugepaged_max_ptes_shared) {
                        result = SCAN_EXCEED_SHARED_PTE;
                        count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
                        goto out;
@@ -657,10 +663,14 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                if (PageCompound(page))
                        list_add_tail(&page->lru, compound_pagelist);
 next:
-               /* There should be enough young pte to collapse the page */
-               if (pte_young(pteval) ||
-                   page_is_young(page) || PageReferenced(page) ||
-                   mmu_notifier_test_young(vma->vm_mm, address))
+               /*
+                * If collapse was initiated by khugepaged, check that there is
+                * enough young pte to justify collapsing the page
+                */
+               if (cc->is_khugepaged &&
+                   (pte_young(pteval) || page_is_young(page) ||
+                    PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
+                                                                    address)))
                        referenced++;
 
                if (pte_write(pteval))
@@ -669,7 +679,7 @@ next:
 
        if (unlikely(!writable)) {
                result = SCAN_PAGE_RO;
-       } else if (unlikely(!referenced)) {
+       } else if (unlikely(cc->is_khugepaged && !referenced)) {
                result = SCAN_LACK_REFERENCED_PAGE;
        } else {
                result = SCAN_SUCCEED;
@@ -748,6 +758,7 @@ static void khugepaged_alloc_sleep(void)
 
 
 struct collapse_control khugepaged_collapse_control = {
+       .is_khugepaged = true,
        .last_target_node = NUMA_NO_NODE,
 };
 
@@ -1028,7 +1039,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
        mmu_notifier_invalidate_range_end(&range);
 
        spin_lock(pte_ptl);
-       result =  __collapse_huge_page_isolate(vma, address, pte,
+       result =  __collapse_huge_page_isolate(vma, address, pte, cc,
                                               &compound_pagelist);
        spin_unlock(pte_ptl);
 
@@ -1119,7 +1130,8 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
             _pte++, _address += PAGE_SIZE) {
                pte_t pteval = *_pte;
                if (is_swap_pte(pteval)) {
-                       if (++unmapped <= khugepaged_max_ptes_swap) {
+                       if (++unmapped <= khugepaged_max_ptes_swap ||
+                           !cc->is_khugepaged) {
                                /*
                                 * Always be strict with uffd-wp
                                 * enabled swap entries.  Please see
@@ -1138,7 +1150,8 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
                }
                if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
                        if (!userfaultfd_armed(vma) &&
-                           ++none_or_zero <= khugepaged_max_ptes_none) {
+                           (++none_or_zero <= khugepaged_max_ptes_none ||
+                            !cc->is_khugepaged)) {
                                continue;
                        } else {
                                result = SCAN_EXCEED_NONE_PTE;
@@ -1168,8 +1181,9 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
                        goto out_unmap;
                }
 
-               if (page_mapcount(page) > 1 &&
-                               ++shared > khugepaged_max_ptes_shared) {
+               if (cc->is_khugepaged &&
+                   page_mapcount(page) > 1 &&
+                   ++shared > khugepaged_max_ptes_shared) {
                        result = SCAN_EXCEED_SHARED_PTE;
                        count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
                        goto out_unmap;
@@ -1223,14 +1237,22 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
                        result = SCAN_PAGE_COUNT;
                        goto out_unmap;
                }
-               if (pte_young(pteval) ||
-                   page_is_young(page) || PageReferenced(page) ||
-                   mmu_notifier_test_young(vma->vm_mm, address))
+
+               /*
+                * If collapse was initiated by khugepaged, check that there is
+                * enough young pte to justify collapsing the page
+                */
+               if (cc->is_khugepaged &&
+                   (pte_young(pteval) || page_is_young(page) ||
+                    PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
+                                                                    address)))
                        referenced++;
        }
        if (!writable) {
                result = SCAN_PAGE_RO;
-       } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
+       } else if (cc->is_khugepaged &&
+                  (!referenced ||
+                   (unmapped && referenced < HPAGE_PMD_NR / 2))) {
                result = SCAN_LACK_REFERENCED_PAGE;
        } else {
                result = SCAN_SUCCEED;
@@ -1899,7 +1921,8 @@ static int khugepaged_scan_file(struct mm_struct *mm, struct file *file,
                        continue;
 
                if (xa_is_value(page)) {
-                       if (++swap > khugepaged_max_ptes_swap) {
+                       if (cc->is_khugepaged &&
+                           ++swap > khugepaged_max_ptes_swap) {
                                result = SCAN_EXCEED_SWAP_PTE;
                                count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
                                break;
@@ -1950,7 +1973,8 @@ static int khugepaged_scan_file(struct mm_struct *mm, struct file *file,
        rcu_read_unlock();
 
        if (result == SCAN_SUCCEED) {
-               if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
+               if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none &&
+                   cc->is_khugepaged) {
                        result = SCAN_EXCEED_NONE_PTE;
                        count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
                } else {