]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/khugepaged: stop using vma linked list
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 22 Aug 2022 15:06:28 +0000 (15:06 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Aug 2022 05:03:18 +0000 (22:03 -0700)
Use vma iterator & find_vma() instead of vma linked list.

Link: https://lkml.kernel.org/r/20220822150128.1562046-53-Liam.Howlett@oracle.com
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c
mm/khugepaged.c

index 66bf4c7f716be1239ec92e2e96c637b140033168..428c42bfd4c4d73c5062b3046c49dcaa6222128e 100644 (file)
@@ -2339,11 +2339,11 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
        split_huge_pmd_if_needed(vma, end);
 
        /*
-        * If we're also updating the vma->vm_next->vm_start,
+        * If we're also updating the next vma vm_start,
         * check if we need to split it.
         */
        if (adjust_next > 0) {
-               struct vm_area_struct *next = vma->vm_next;
+               struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
                unsigned long nstart = next->vm_start;
                nstart += adjust_next;
                split_huge_pmd_if_needed(next, nstart);
index d3313b7a8fe5cb643e05530e2f6b902e3bd847ae..d8e3881063225a162f2fc7e4dc0501fc29a4d3ca 100644 (file)
@@ -2053,10 +2053,12 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
        __releases(&khugepaged_mm_lock)
        __acquires(&khugepaged_mm_lock)
 {
+       struct vma_iterator vmi;
        struct mm_slot *mm_slot;
        struct mm_struct *mm;
        struct vm_area_struct *vma;
        int progress = 0;
+       unsigned long address;
 
        VM_BUG_ON(!pages);
        lockdep_assert_held(&khugepaged_mm_lock);
@@ -2081,11 +2083,14 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
        vma = NULL;
        if (unlikely(!mmap_read_trylock(mm)))
                goto breakouterloop_mmap_lock;
-       if (likely(!hpage_collapse_test_exit(mm)))
-               vma = find_vma(mm, khugepaged_scan.address);
 
        progress++;
-       for (; vma; vma = vma->vm_next) {
+       if (unlikely(hpage_collapse_test_exit(mm)))
+               goto breakouterloop;
+
+       address = khugepaged_scan.address;
+       vma_iter_init(&vmi, mm, address);
+       for_each_vma(vmi, vma) {
                unsigned long hstart, hend;
 
                cond_resched();