From 1be243094cd007eb790aa23a8be8415742c9821f Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Wed, 10 Aug 2022 16:09:15 -0400 Subject: [PATCH] mm: Don't use __vma_adjust() in __split_vma() Use the abstracted locking and maple tree operations. Since __split_vma() is the only user of the __vma_adjust() function to use the insert argument, drop that argument. Remove the NULL passed through from fs/exec's shift_arg_pages() at the same time. Signed-off-by: Liam R. Howlett --- fs/exec.c | 4 +- include/linux/mm.h | 7 ++- mm/mmap.c | 109 +++++++++++++++++++++------------------------ 3 files changed, 56 insertions(+), 64 deletions(-) diff --git a/fs/exec.c b/fs/exec.c index 9843cecd031a..a52175a559fd 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -702,7 +702,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) /* * cover the whole range: [new_start, old_end) */ - if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL)) + if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff)) return -ENOMEM; /* @@ -737,7 +737,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) /* * Shrink the vma to just the new range. Always succeeds. */ - vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL); + vma_adjust(vma, new_start, new_end, vma->vm_pgoff); return 0; } diff --git a/include/linux/mm.h b/include/linux/mm.h index 6dee7cd93f53..0a827fae6f62 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2599,12 +2599,11 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node); /* mmap.c */ extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start, - unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, - struct vm_area_struct *expand); + unsigned long end, pgoff_t pgoff, struct vm_area_struct *expand); static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start, - unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert) + unsigned long end, pgoff_t pgoff) { - return __vma_adjust(vma, start, end, pgoff, insert, NULL); + return __vma_adjust(vma, start, end, pgoff, NULL); } extern struct vm_area_struct *vma_merge(struct mm_struct *, struct vm_area_struct *prev, unsigned long addr, unsigned long end, diff --git a/mm/mmap.c b/mm/mmap.c index 5e4075d86b93..449c0167995e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -598,7 +598,6 @@ static inline void unlock_vma(struct vma_locking *vl, struct ma_state *mas, * us to insert it before dropping the locks * (it may either follow vma or precede it). */ - mas_reset(mas); vma_mas_store(vl->insert, mas); mm->map_count++; } @@ -720,8 +719,7 @@ nomem: * before we drop the necessary locks. */ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, - unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, - struct vm_area_struct *expand) + unsigned long end, pgoff_t pgoff, struct vm_area_struct *expand) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *remove2 = NULL; @@ -735,7 +733,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, struct vm_area_struct *exporter = NULL, *importer = NULL; struct vma_locking vma_lock; - if (next && !insert) { + if (next) { if (end >= next->vm_end) { /* * vma expands, overlapping all the next, and @@ -827,37 +825,28 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, VM_WARN_ON(vma_lock.anon_vma && adjust_next && next->anon_vma && vma_lock.anon_vma != next->anon_vma); - vma_lock.insert = insert; + lock_vma(&vma_lock); - if (start != vma->vm_start) { - if ((vma->vm_start < start) && - (!insert || (insert->vm_end != start))) { - vma_mas_szero(&mas, vma->vm_start, start); - VM_WARN_ON(insert && insert->vm_start > vma->vm_start); - } else { - vma_changed = true; - } - vma->vm_start = start; - } - if (end != vma->vm_end) { - if (vma->vm_end > end) { - if (!insert || (insert->vm_start != end)) { - vma_mas_szero(&mas, end, vma->vm_end); - mas_reset(&mas); - VM_WARN_ON(insert && - insert->vm_end < vma->vm_end); - } - } else { - vma_changed = true; - } - vma->vm_end = end; + if (vma->vm_start < start) { + vma_mas_szero(&mas, vma->vm_start, start); + } else if (start != vma->vm_start) { + vma_changed = true; + } + + if (vma->vm_end > end) { + vma_mas_szero(&mas, end, vma->vm_end); + } else if (end != vma->vm_end) { + vma_changed = true; } + vma->vm_start = start; + vma->vm_end = end; + vma->vm_pgoff = pgoff; + if (vma_changed) vma_mas_store(vma, &mas); - vma->vm_pgoff = pgoff; if (adjust_next) { next->vm_start += adjust_next; next->vm_pgoff += adjust_next >> PAGE_SHIFT; @@ -1059,21 +1048,20 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, if (merge_prev && merge_next && is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) { /* cases 1, 6 */ - err = __vma_adjust(prev, prev->vm_start, - next->vm_end, prev->vm_pgoff, NULL, - prev); + err = __vma_adjust(prev, prev->vm_start, next->vm_end, + prev->vm_pgoff, prev); res = prev; } else if (merge_prev) { /* cases 2, 5, 7 */ - err = __vma_adjust(prev, prev->vm_start, - end, prev->vm_pgoff, NULL, prev); + err = __vma_adjust(prev, prev->vm_start, end, prev->vm_pgoff, + prev); res = prev; } else if (merge_next) { if (prev && addr < prev->vm_end) /* case 4 */ - err = __vma_adjust(prev, prev->vm_start, - addr, prev->vm_pgoff, NULL, next); + err = __vma_adjust(prev, prev->vm_start, addr, + prev->vm_pgoff, next); else /* cases 3, 8 */ err = __vma_adjust(mid, addr, next->vm_end, - next->vm_pgoff - pglen, NULL, next); + next->vm_pgoff - pglen, next); res = next; } @@ -2203,6 +2191,8 @@ static void unmap_region(struct mm_struct *mm, struct maple_tree *mt, int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) { + MA_STATE(mas, &mm->mm_mt, 0, 0); + struct vma_locking vma_lock; struct vm_area_struct *new; int err; validate_mm_mt(mm); @@ -2217,16 +2207,20 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, if (!new) return -ENOMEM; - if (new_below) + err = -ENOMEM; + if (mas_preallocate(&mas, vma, GFP_KERNEL)) + goto out_free_vma; + + if (new_below) { new->vm_end = addr; - else { + } else { new->vm_start = addr; new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); } err = vma_dup_policy(vma, new); if (err) - goto out_free_vma; + goto out_free_mas; err = anon_vma_clone(new, vma); if (err) @@ -2238,28 +2232,27 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, if (new->vm_ops && new->vm_ops->open) new->vm_ops->open(new); - if (new_below) - err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + - ((addr - new->vm_start) >> PAGE_SHIFT), new); - else - err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); + vma_adjust_trans_huge(vma, vma->vm_start, addr, 0); + init_vma_lock(&vma_lock, vma); + vma_lock.insert = new; + lock_vma(&vma_lock); - /* Success. */ - if (!err) - return 0; + if (new_below) { + vma->vm_start = addr; + vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT; + } else { + vma->vm_end = addr; + } - /* Avoid vm accounting in close() operation */ - new->vm_start = new->vm_end; - new->vm_pgoff = 0; - /* Clean everything up if vma_adjust failed. */ - if (new->vm_ops && new->vm_ops->close) - new->vm_ops->close(new); - if (new->vm_file) - fput(new->vm_file); - unlink_anon_vmas(new); - out_free_mpol: + /* unlock_vma stores the new vma */ + unlock_vma(&vma_lock, &mas, mm); + return 0; + +out_free_mpol: mpol_put(vma_policy(new)); - out_free_vma: +out_free_mas: + mas_destroy(&mas); +out_free_vma: vm_area_free(new); validate_mm_mt(mm); return err; -- 2.50.1