]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/vmscan: Use VMA_ITERATOR in get_next_vma()
authorAndrew Morton <akpm@linux-foundation.org>
Thu, 14 Apr 2022 19:16:57 +0000 (12:16 -0700)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Tue, 19 Apr 2022 17:23:29 +0000 (13:23 -0400)
The next vma may actually be many VMAs away, so use the VMA_ITERATOR to
continue searching from vm_end onwards.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
mm/vmscan.c

index d4a7d2bd276da5dd26d1569dd94bb7d6b9151a1e..0f5c53996365f6c76ac84c3d640672c384869d17 100644 (file)
@@ -3697,24 +3697,21 @@ static bool get_next_vma(struct mm_walk *walk, unsigned long mask, unsigned long
                         unsigned long *start, unsigned long *end)
 {
        unsigned long next = round_up(*end, size);
+       VMA_ITERATOR(vmi, walk->mm, walk->vma->vm_end)
 
        VM_BUG_ON(mask & size);
        VM_BUG_ON(*start >= *end);
        VM_BUG_ON((next & mask) != (*start & mask));
 
-       while (walk->vma) {
-               if (next >= walk->vma->vm_end) {
-                       walk->vma = walk->vma->vm_next;
+       for_each_mte_vma(vmi, walk->vma) {
+               if (next >= walk->vma->vm_end)
                        continue;
-               }
 
                if ((next & mask) != (walk->vma->vm_start & mask))
                        return false;
 
-               if (should_skip_vma(walk->vma->vm_start, walk->vma->vm_end, walk)) {
-                       walk->vma = walk->vma->vm_next;
+               if (should_skip_vma(walk->vma->vm_start, walk->vma->vm_end, walk))
                        continue;
-               }
 
                *start = max(next, walk->vma->vm_start);
                next = (next | ~mask) + 1;
@@ -4062,7 +4059,7 @@ static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_
                /* the caller might be holding the lock for write */
                if (mmap_read_trylock(mm)) {
                        unsigned long start = walk->next_addr;
-                       unsigned long end = mm->highest_vm_end;
+                       unsigned long end = ULONG_MAX;
 
                        err = walk_page_range(mm, start, end, &mm_walk_ops, walk);