]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: Pass through vma iterator to __vma_adjust()
authorLiam Howlett <Liam.Howlett@oracle.com>
Fri, 11 Nov 2022 20:41:07 +0000 (15:41 -0500)
committerLiam Howlett <Liam.Howlett@oracle.com>
Mon, 14 Nov 2022 19:57:07 +0000 (14:57 -0500)
Pass the vma iterator through to __vma_adjust() so the state can be
updated.

Signed-off-by: Liam Howlett <Liam.Howlett@oracle.com>
include/linux/mm.h
mm/mmap.c

index 28b0947c2953d435aabf79ba93c75ecb2b981321..82014c1cd5c031aadeccca6f5209a66b9f2c09af 100644 (file)
@@ -2749,13 +2749,15 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
 
 /* mmap.c */
 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
-extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
+extern int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start,
        unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
        struct vm_area_struct *expand);
 static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
        unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
 {
-       return __vma_adjust(vma, start, end, pgoff, insert, NULL);
+       VMA_ITERATOR(vmi, vma->vm_mm, start);
+
+       return __vma_adjust(&vmi, vma, start, end, pgoff, insert, NULL);
 }
 extern struct vm_area_struct *vma_merge(struct vma_iterator *vmi,
        struct mm_struct *, struct vm_area_struct *prev, unsigned long addr,
index 23add3eb0094642c6046983789d162181373b6fb..0e6f0705d81a55e6c59c3a9bcbcdaea254869e73 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -562,9 +562,9 @@ nomem:
  * are necessary.  The "insert" vma (if any) is to be inserted
  * before we drop the necessary locks.
  */
-int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
-       unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
-       struct vm_area_struct *expand)
+int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma,
+       unsigned long start, unsigned long end, pgoff_t pgoff,
+       struct vm_area_struct *insert, struct vm_area_struct *expand)
 {
        struct mm_struct *mm = vma->vm_mm;
        struct vm_area_struct *next_next = NULL;        /* uninit var warning */
@@ -577,7 +577,6 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
        bool vma_changed = false;
        long adjust_next = 0;
        int remove_next = 0;
-       VMA_ITERATOR(vmi, mm, 0);
        struct vm_area_struct *exporter = NULL, *importer = NULL;
 
        if (next && !insert) {
@@ -662,7 +661,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
                }
        }
 
-       if (vma_iter_prealloc(&vmi, vma))
+       if (vma_iter_prealloc(vmi, vma))
                return -ENOMEM;
 
        vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
@@ -708,7 +707,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
        if (start != vma->vm_start) {
                if ((vma->vm_start < start) &&
                    (!insert || (insert->vm_end != start))) {
-                       vma_iter_clear(&vmi, vma->vm_start, start);
+                       vma_iter_clear(vmi, vma->vm_start, start);
                        VM_WARN_ON(insert && insert->vm_start > vma->vm_start);
                } else {
                        vma_changed = true;
@@ -718,8 +717,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
        if (end != vma->vm_end) {
                if (vma->vm_end > end) {
                        if (!insert || (insert->vm_start != end)) {
-                               vma_iter_clear(&vmi, end, vma->vm_end);
-                               vma_iter_set(&vmi, vma->vm_end);
+                               vma_iter_clear(vmi, end, vma->vm_end);
+                               vma_iter_set(vmi, vma->vm_end);
                                VM_WARN_ON(insert &&
                                           insert->vm_end < vma->vm_end);
                        }
@@ -730,13 +729,13 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
        }
 
        if (vma_changed)
-               vma_iter_store(&vmi, vma);
+               vma_iter_store(vmi, vma);
 
        vma->vm_pgoff = pgoff;
        if (adjust_next) {
                next->vm_start += adjust_next;
                next->vm_pgoff += adjust_next >> PAGE_SHIFT;
-               vma_iter_store(&vmi, next);
+               vma_iter_store(vmi, next);
        }
 
        if (file) {
@@ -756,7 +755,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
                 * us to insert it before dropping the locks
                 * (it may either follow vma or precede it).
                 */
-               vma_iter_store(&vmi, insert);
+               vma_iter_store(vmi, insert);
                mm->map_count++;
        }
 
@@ -802,7 +801,7 @@ again:
        if (insert && file)
                uprobe_mmap(insert);
 
-       vma_iter_free(&vmi);
+       vma_iter_free(vmi);
        validate_mm(mm);
 
        return 0;
@@ -996,20 +995,20 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
        if (merge_prev && merge_next &&
                        is_mergeable_anon_vma(prev->anon_vma,
                                next->anon_vma, NULL)) {         /* cases 1, 6 */
-               err = __vma_adjust(prev, prev->vm_start,
+               err = __vma_adjust(vmi, prev, prev->vm_start,
                                        next->vm_end, prev->vm_pgoff, NULL,
                                        prev);
                res = prev;
        } else if (merge_prev) {                        /* cases 2, 5, 7 */
-               err = __vma_adjust(prev, prev->vm_start,
+               err = __vma_adjust(vmi, prev, prev->vm_start,
                                        end, prev->vm_pgoff, NULL, prev);
                res = prev;
        } else if (merge_next) {
                if (prev && addr < prev->vm_end)        /* case 4 */
-                       err = __vma_adjust(prev, prev->vm_start,
+                       err = __vma_adjust(vmi, prev, prev->vm_start,
                                        addr, prev->vm_pgoff, NULL, next);
                else                                    /* cases 3, 8 */
-                       err = __vma_adjust(mid, addr, next->vm_end,
+                       err = __vma_adjust(vmi, mid, addr, next->vm_end,
                                        next->vm_pgoff - pglen, NULL, next);
                res = next;
        }