From: Matthew Wilcox (Oracle) Date: Wed, 20 Jul 2022 02:18:00 +0000 (+0000) Subject: mm/khugepaged: stop using vma linked list X-Git-Tag: howlett/maple/20220816~17 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=9bd89c6c8f250a2c7b283b128d436d89bf1d15d7;p=users%2Fjedix%2Flinux-maple.git mm/khugepaged: stop using vma linked list Use vma iterator & find_vma() instead of vma linked list. Link: https://lkml.kernel.org/r/20220504011345.662299-37-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220621204632.3370049-53-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20220720021727.17018-53-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Cc: Davidlohr Bueso Cc: David Hildenbrand Cc: Hulk Robot Cc: Lukas Bulwahn Cc: Sven Schnelle Cc: Yang Yingliang Signed-off-by: Andrew Morton --- diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 8a7c1b344abef..c10f2faf1bc28 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2312,11 +2312,11 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, split_huge_pmd_if_needed(vma, end); /* - * If we're also updating the vma->vm_next->vm_start, + * If we're also updating the next vma vm_start, * check if we need to split it. */ if (adjust_next > 0) { - struct vm_area_struct *next = vma->vm_next; + struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end); unsigned long nstart = next->vm_start; nstart += adjust_next; split_huge_pmd_if_needed(next, nstart); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index c4f03a2ad6024..cfe231c5958f7 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -2056,10 +2056,12 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, __releases(&khugepaged_mm_lock) __acquires(&khugepaged_mm_lock) { + struct vma_iterator vmi; struct mm_slot *mm_slot; struct mm_struct *mm; struct vm_area_struct *vma; int progress = 0; + unsigned long address; VM_BUG_ON(!pages); lockdep_assert_held(&khugepaged_mm_lock); @@ -2083,11 +2085,14 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, vma = NULL; if (unlikely(!mmap_read_trylock(mm))) goto breakouterloop_mmap_lock; - if (likely(!khugepaged_test_exit(mm))) - vma = find_vma(mm, khugepaged_scan.address); progress++; - for (; vma; vma = vma->vm_next) { + if (unlikely(khugepaged_test_exit(mm))) + goto breakouterloop; + + address = khugepaged_scan.address; + vma_iter_init(&vmi, mm, address); + for_each_vma(vmi, vma) { unsigned long hstart, hend; cond_resched();