]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/mmap: Remove __vma_adjust() remove_vma_adjust
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 11 Aug 2022 20:19:43 +0000 (16:19 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Fri, 12 Aug 2022 13:56:54 +0000 (09:56 -0400)
Inline the work for __vma_adjust() into vma_merge().  This reduces code
size and has the added benefits of the comments for the cases being
located with the code.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
kernel/events/uprobes.c
mm/filemap.c
mm/mmap.c
mm/rmap.c

index 401bc2d24ce062ac349bb89fdaa1fc7766188060..0f79859bc9d9e523ff18b90ee8ea4bc81cba600f 100644 (file)
@@ -1350,7 +1350,7 @@ static int delayed_ref_ctr_inc(struct vm_area_struct *vma)
 }
 
 /*
- * Called from mmap_region/vma_adjust with mm->mmap_lock acquired.
+ * Called from mmap_region/vma_merge with mm->mmap_lock acquired.
  *
  * Currently we ignore all errors and always return 0, the callers
  * can't handle the failure anyway.
index cd59f055e29d50306610e16a81fa4b6b0c1d1e1f..33d4dce47f3fd82939fe4521cb9c682d41c24bf5 100644 (file)
@@ -97,7 +97,7 @@
  *    ->i_pages lock           (__sync_single_inode)
  *
  *  ->i_mmap_rwsem
- *    ->anon_vma.lock          (vma_adjust)
+ *    ->anon_vma.lock          (vma_merge)
  *
  *  ->anon_vma.lock
  *    ->page_table_lock or pte_lock    (anon_vma_prepare and various)
index b5a212e54b4e870a2843ba61c6d6c5ce69b12aaf..843815afb0b65f07c277c98c2e7067cc144efe19 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -768,135 +768,6 @@ int vma_shrink(struct ma_state *mas, struct vm_area_struct *vma,
        return 0;
 }
 
-/*
- * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
- * is already present in an i_mmap tree without adjusting the tree.
- * The following helper function should be used when such adjustments
- * are necessary.  The "insert" vma (if any) is to be inserted
- * before we drop the necessary locks.
- */
-int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
-       unsigned long end, pgoff_t pgoff, struct vm_area_struct *expand)
-{
-       struct mm_struct *mm = vma->vm_mm;
-       struct vm_area_struct *remove2 = NULL;
-       struct vm_area_struct *remove = NULL;
-       struct vm_area_struct *next = find_vma(mm, vma->vm_end);
-       struct vm_area_struct *orig_vma = vma;
-       struct file *file = vma->vm_file;
-       bool vma_changed = false;
-       long adjust_next = 0;
-       MA_STATE(mas, &mm->mm_mt, 0, 0);
-       struct vma_locking vma_lock;
-
-       if (next) {
-               int error = 0;
-
-               if (end >= next->vm_end) {
-                       /*
-                        * vma expands, overlapping all the next, and
-                        * perhaps the one after too (mprotect case 6).
-                        * The only other cases that gets here are
-                        * case 1, case 7 and case 8.
-                        */
-                       if (next == expand) {
-                               /*
-                                * The only case where we don't expand "vma"
-                                * and we expand "next" instead is case 8.
-                                */
-                               VM_WARN_ON(end != next->vm_end);
-                               /*
-                                * remove_next == 3 means we're
-                                * removing "vma" and that to do so we
-                                * swapped "vma" and "next".
-                                */
-                               VM_WARN_ON(file != next->vm_file);
-                               swap(vma, next);
-                               remove = next;
-                       } else {
-                               VM_WARN_ON(expand != vma);
-                               /*
-                                * case 1, 6, 7, remove_next == 2 is case 6,
-                                * remove_next == 1 is case 1 or 7.
-                                */
-                               remove = next;
-                               if (end > next->vm_end);
-                                       remove2 = find_vma(mm, next->vm_end);
-
-                               VM_WARN_ON(remove2 != NULL &&
-                                          end != remove2->vm_end);
-                       }
-
-                       /*
-                        * If next doesn't have anon_vma, import from vma after
-                        * next, if the vma overlaps with it.
-                        */
-                       if (remove != NULL && !next->anon_vma)
-                               error = dup_anon_vma(vma, remove2);
-                       else
-                               error = dup_anon_vma(vma, remove);
-
-               } else if (end > next->vm_start) {
-                       /*
-                        * vma expands, overlapping part of the next:
-                        * mprotect case 5 shifting the boundary up.
-                        */
-                       adjust_next = (end - next->vm_start);
-                       VM_WARN_ON(expand != vma);
-                       error = dup_anon_vma(vma, next);
-               } else if (end < vma->vm_end) {
-                       /*
-                        * vma shrinks, and !insert tells it's not
-                        * split_vma inserting another: so it must be
-                        * mprotect case 4 shifting the boundary down.
-                        */
-                       adjust_next = -(vma->vm_end - end);
-                       VM_WARN_ON(expand != next);
-                       error = dup_anon_vma(next, vma);
-               }
-               if (error)
-                       return error;
-       }
-
-       if (mas_preallocate(&mas, vma, GFP_KERNEL))
-               return -ENOMEM;
-
-       vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
-
-       init_multi_vma_lock(&vma_lock, vma, adjust_next ? next : NULL, remove,
-                           remove2);
-
-       VM_WARN_ON(vma_lock.anon_vma && adjust_next && next->anon_vma &&
-                  vma_lock.anon_vma != next->anon_vma);
-
-       lock_vma(&vma_lock);
-
-       if (start < vma->vm_start || end > vma->vm_end)
-               vma_changed = true;
-
-       vma->vm_start = start;
-       vma->vm_end = end;
-       vma->vm_pgoff = pgoff;
-
-       if (vma_changed)
-               vma_mas_store(vma, &mas);
-
-       if (adjust_next) {
-               next->vm_start += adjust_next;
-               next->vm_pgoff += adjust_next >> PAGE_SHIFT;
-               if (adjust_next < 0) {
-                       BUG_ON(vma_changed);
-                       vma_mas_store(next, &mas);
-               }
-       }
-
-       unlock_vma(&vma_lock, &mas, mm);
-       mas_destroy(&mas);
-       validate_mm(mm);
-
-       return 0;
-}
-
 /*
  * If the vma has a ->close operation then the driver probably needs to release
  * per-vma resources, so we don't attempt to merge those.
@@ -1023,7 +894,7 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
  * It is important for case 8 that the vma NNNN overlapping the
  * region AAAA is never going to extended over XXXX. Instead XXXX must
  * be extended in region AAAA and NNNN must be removed. This way in
- * all cases where vma_merge succeeds, the moment vma_adjust drops the
+ * all cases where vma_merge succeeds, the moment vma_merge drops the
  * rmap_locks, the properties of the merged vma will be already
  * correct for the whole merged range. Some of those properties like
  * vm_page_prot/vm_flags may be accessed by rmap_walks and they must
@@ -1043,10 +914,17 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                        struct anon_vma_name *anon_name)
 {
        pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
-       struct vm_area_struct *mid, *next, *res;
+       pgoff_t vma_pgoff;
+       struct vm_area_struct *mid, *next, *vma, *res, *adj, *remove, *remove2;
        int err = -1;
        bool merge_prev = false;
        bool merge_next = false;
+       bool vma_expanded = false;
+       struct vma_locking vma_lock;
+       unsigned long vma_end = end;
+       long adj_next = 0;
+       unsigned long vma_start = addr;
+       MA_STATE(mas, &mm->mm_mt, 0, 0);
 
        /*
         * We later require that vma->vm_flags == vm_flags,
@@ -1065,13 +943,17 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
        VM_WARN_ON(mid && end > mid->vm_end);
        VM_WARN_ON(addr >= end);
 
-       /* Can we merge the predecessor? */
-       if (prev && prev->vm_end == addr &&
-                       mpol_equal(vma_policy(prev), policy) &&
-                       can_vma_merge_after(prev, vm_flags,
-                                           anon_vma, file, pgoff,
-                                           vm_userfaultfd_ctx, anon_name)) {
-               merge_prev = true;
+       if (prev) {
+               res = prev;
+               vma = prev;
+               vma_start = prev->vm_start;
+               vma_pgoff = prev->vm_pgoff;
+               /* Can we merge the predecessor? */
+               if (prev->vm_end == addr && mpol_equal(vma_policy(prev), policy)
+                   && can_vma_merge_after(prev, vm_flags, anon_vma, file,
+                                  pgoff, vm_userfaultfd_ctx, anon_name)) {
+                       merge_prev = true;
+               }
        }
        /* Can we merge the successor? */
        if (next && end == next->vm_start &&
@@ -1081,32 +963,89 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                                             vm_userfaultfd_ctx, anon_name)) {
                merge_next = true;
        }
+
+       remove = remove2 = adj = NULL;
        /* Can we merge both the predecessor and the successor? */
        if (merge_prev && merge_next &&
-                       is_mergeable_anon_vma(prev->anon_vma,
-                               next->anon_vma, NULL)) {         /* cases 1, 6 */
-               err = __vma_adjust(prev, prev->vm_start, next->vm_end,
-                                  prev->vm_pgoff, prev);
-               res = prev;
-       } else if (merge_prev) {                        /* cases 2, 5, 7 */
-               err = __vma_adjust(prev, prev->vm_start, end, prev->vm_pgoff,
-                                  prev);
-               res = prev;
+           is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) {
+               /* cases 1, 6 */
+               remove = mid;
+               vma_end = next->vm_end;
+               err = dup_anon_vma(res, remove);
+               if (mid != next) {                      /* case 6 */
+                       remove2 = next;
+                       if (!remove->anon_vma)
+                               err = dup_anon_vma(res, remove2);
+               }
+       } else if (merge_prev) {
+               /* case 2, 5, 7.  case 2 does not anon clone */
+               err = 0;
+               if (end > next->vm_start) {
+                       err = dup_anon_vma(res, next);
+                       if (end == next->vm_end) {      /* case 7 */
+                               remove = next;
+                       } else {                        /* case 5 */
+                               adj = next;
+                               adj_next = (end - next->vm_start);
+                       }
+               }
        } else if (merge_next) {
-               if (prev && addr < prev->vm_end)        /* case 4 */
-                       err = __vma_adjust(prev, prev->vm_start, addr,
-                                          prev->vm_pgoff, next);
-               else                                    /* cases 3, 8 */
-                       err = __vma_adjust(mid, addr, next->vm_end,
-                                          next->vm_pgoff - pglen, next);
+               /* case 3, 4, 8. case 3 does not anon clone */
                res = next;
+               if (prev && addr < prev->vm_end) {      /* case 4 */
+                       adj = next;
+                       vma_end = addr;
+                       adj_next = -(vma->vm_end - addr);
+                       err = dup_anon_vma(res, adj);
+               } else {                                /* case 3, 8 */
+                       vma_pgoff = next->vm_pgoff;
+                       vma = next;
+                       vma_start = addr;
+                       vma_end = next->vm_end;
+                       err = 0;
+                       if (mid != next) {              /* case 8 */
+                               remove = mid;
+                               err = dup_anon_vma(res, remove);
+                       }
+               }
        }
 
-       /*
-        * Cannot merge with predecessor or successor or error in __vma_adjust?
-        */
+       /* Cannot merge or error in anon_vma clone */
        if (err)
                return NULL;
+
+       if (mas_preallocate(&mas, res, GFP_KERNEL))
+               return NULL;
+
+       vma_adjust_trans_huge(vma, vma_start, vma_end, adj_next);
+
+       init_multi_vma_lock(&vma_lock, vma, adj, remove, remove2);
+       VM_WARN_ON(vma_lock.anon_vma && adj_next && next->anon_vma &&
+                  vma_lock.anon_vma != next->anon_vma);
+
+       lock_vma(&vma_lock);
+       if (vma_start < vma->vm_start || vma_end > vma->vm_end)
+               vma_expanded = true;
+
+       vma->vm_start = vma_start;
+       vma->vm_end = vma_end;
+       vma->vm_pgoff = vma_pgoff;
+
+       if (vma_expanded)
+               vma_mas_store(vma, &mas);
+
+       if (adj_next) {
+               next->vm_start += adj_next;
+               next->vm_pgoff += adj_next >> PAGE_SHIFT;
+               if (adj_next < 0) {
+                       WARN_ON(vma_expanded);
+                       vma_mas_store(next, &mas);
+               }
+       }
+
+       unlock_vma(&vma_lock, &mas, mm);
+       mas_destroy(&mas);
+       validate_mm(mm);
        khugepaged_enter_vma(res, vm_flags);
        return res;
 }
index af775855e58f04f79405adf92c60e1a9e8a11e86..068ee4901ec3ad17d208bc944b1c0a3dae4c2afa 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1228,7 +1228,7 @@ void page_add_anon_rmap(struct page *page,
        if (unlikely(PageKsm(page)))
                unlock_page_memcg(page);
 
-       /* address might be in next vma when migration races vma_adjust */
+       /* address might be in next vma when migration races vma_merge */
        else if (first)
                __page_set_anon_rmap(page, vma, address,
                                     !!(flags & RMAP_EXCLUSIVE));
@@ -2520,7 +2520,7 @@ void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
 
        BUG_ON(!PageLocked(page));
        BUG_ON(!anon_vma);
-       /* address might be in next vma when migration races vma_adjust */
+       /* address might be in next vma when migration races vma_merge */
        first = atomic_inc_and_test(compound_mapcount_ptr(page));
        VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
        VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);