]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: Pass vma iterator through to __vma_adjust()
authorLiam R. Howlett <Liam.Howlett@Oracle.com>
Mon, 14 Nov 2022 16:10:05 +0000 (11:10 -0500)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Tue, 13 Dec 2022 21:03:39 +0000 (16:03 -0500)
Pass the iterator through to be used in __vma_adjust().  The state of
the iterator needs to be correct for the operation that will occur so
make the adjustments.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
mm/mmap.c

index 5ba9b8a4e6402f60ca3dbe67a825d1c2ad5c8040..bd1e17a50708373fd3b582d7aacd47927afef97d 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -587,6 +587,10 @@ inline int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
                vma_interval_tree_remove(vma, root);
        }
 
+       /* VMA iterator points to previous, so set to start if necessary */
+       if (vma_iter_addr(vmi) != start)
+               vma_iter_set(vmi, start);
+
        vma->vm_start = start;
        vma->vm_end = end;
        vma->vm_pgoff = pgoff;
@@ -2222,13 +2226,13 @@ static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
 /*
  * __split_vma() bypasses sysctl_max_map_count checking.  We use this where it
  * has already been checked or doesn't make sense to fail.
+ * VMA Iterator will point to the end VMA.
  */
 int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
                unsigned long addr, int new_below)
 {
        struct vm_area_struct *new;
        int err;
-       unsigned long end = vma->vm_end;
 
        validate_mm_mt(vma->vm_mm);
 
@@ -2264,14 +2268,17 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
                new->vm_ops->open(new);
 
        if (new_below)
-               err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
-                       ((addr - new->vm_start) >> PAGE_SHIFT), new);
+               err = __vma_adjust(vmi, vma, addr, vma->vm_end,
+                  vma->vm_pgoff + ((addr - new->vm_start) >> PAGE_SHIFT),
+                  new, NULL);
        else
-               err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
+               err = __vma_adjust(vmi, vma, vma->vm_start, addr, vma->vm_pgoff,
+                                new, NULL);
 
        /* Success. */
        if (!err) {
-               vma_iter_set(vmi, end);
+               if (new_below)
+                       vma_next(vmi);
                return 0;
        }
 
@@ -2366,8 +2373,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
                if (error)
                        goto start_split_failed;
 
-               vma_iter_set(vmi, start);
-               vma = vma_find(vmi, end);
+               vma = vma_iter_load(vmi);
        }
 
        prev = vma_prev(vmi);
@@ -2387,7 +2393,6 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
                        if (error)
                                goto end_split_failed;
 
-                       vma_iter_set(vmi, end);
                        split = vma_prev(vmi);
                        error = munmap_sidetree(split, &mas_detach);
                        if (error)
@@ -2631,6 +2636,7 @@ cannot_expand:
                goto unacct_error;
        }
 
+       vma_iter_set(&vmi, addr);
        vma->vm_start = addr;
        vma->vm_end = end;
        vma->vm_flags = vm_flags;