]> www.infradead.org Git - users/willy/linux.git/commitdiff
mm/mlock: Use maple tree iterators instead of vma linked list
authorLiam R. Howlett <Liam.Howlett@Oracle.com>
Mon, 4 Jan 2021 20:00:14 +0000 (15:00 -0500)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 20 Oct 2021 20:00:34 +0000 (16:00 -0400)
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
mm/mlock.c

index 16d2ee160d43c572d5a19ed989b7a8d70df8e9ca..1e102fa52ef6d7d9ccf1dcd77831b9b4b0774008 100644 (file)
@@ -562,6 +562,7 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
        unsigned long nstart, end, tmp;
        struct vm_area_struct *vma, *prev;
        int error;
+       MA_STATE(mas, &current->mm->mm_mt, start, start);
 
        VM_BUG_ON(offset_in_page(start));
        VM_BUG_ON(len != PAGE_ALIGN(len));
@@ -570,11 +571,11 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
                return -EINVAL;
        if (end == start)
                return 0;
-       vma = find_vma(current->mm, start);
-       if (!vma || vma->vm_start > start)
+       vma = mas_walk(&mas);
+       if (!vma)
                return -ENOMEM;
 
-       prev = vma->vm_prev;
+       prev = mas_prev(&mas, 0);
        if (start > vma->vm_start)
                prev = vma;
 
@@ -596,7 +597,7 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
                if (nstart >= end)
                        break;
 
-               vma = prev->vm_next;
+               vma = vma_next(prev->vm_mm, prev);
                if (!vma || vma->vm_start != nstart) {
                        error = -ENOMEM;
                        break;
@@ -617,15 +618,13 @@ static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
 {
        struct vm_area_struct *vma;
        unsigned long count = 0;
+       MA_STATE(mas, &mm->mm_mt, start, start);
 
        if (mm == NULL)
                mm = current->mm;
 
-       vma = find_vma(mm, start);
-       if (vma == NULL)
-               return 0;
-
-       for (; vma ; vma = vma->vm_next) {
+       rcu_read_lock();
+       mas_for_each(&mas, vma, start + len) {
                if (start >= vma->vm_end)
                        continue;
                if (start + len <=  vma->vm_start)
@@ -640,6 +639,7 @@ static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
                        count += vma->vm_end - vma->vm_start;
                }
        }
+       rcu_read_unlock();
 
        return count >> PAGE_SHIFT;
 }
@@ -740,6 +740,7 @@ static int apply_mlockall_flags(int flags)
 {
        struct vm_area_struct *vma, *prev = NULL;
        vm_flags_t to_add = 0;
+       unsigned long addr = 0;
 
        current->mm->def_flags &= VM_LOCKED_CLEAR_MASK;
        if (flags & MCL_FUTURE) {
@@ -758,7 +759,7 @@ static int apply_mlockall_flags(int flags)
                        to_add |= VM_LOCKONFAULT;
        }
 
-       for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
+       mt_for_each(&current->mm->mm_mt, vma, addr, ULONG_MAX) {
                vm_flags_t newflags;
 
                newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;