From 79a46fab832212778d6a3196da7d78cdb3e12c29 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Wed, 10 Aug 2022 17:24:05 -0400 Subject: [PATCH] mm/mmap: Don't use __vma_adjust() in shift_arg_pages() Introduce shrink_vma() which uses the lock_vma() and unlock_vma() functions to reduce the vma coverage. Convert shift_arg_pages() to use expand_vma() and the new shrink_vma() function. Remove shrink_vma() support from __vma_adjust() since shift_arg_pages() is the only user that shrinks a VMA in this way. Signed-off-by: Liam R. Howlett --- fs/exec.c | 8 ++++--- include/linux/mm.h | 12 ++++------ mm/mmap.c | 59 +++++++++++++++++++++++++++++++++++----------- 3 files changed, 55 insertions(+), 24 deletions(-) diff --git a/fs/exec.c b/fs/exec.c index a52175a559fd..978aeac287dc 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -689,6 +689,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) VMA_ITERATOR(vmi, mm, new_start); struct vm_area_struct *next; struct mmu_gather tlb; + MA_STATE(mas, &vma->vm_mm->mm_mt, 0, 0); BUG_ON(new_start > new_end); @@ -702,7 +703,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) /* * cover the whole range: [new_start, old_end) */ - if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff)) + if (vma_expand(&mas, vma, new_start, old_end, vma->vm_pgoff, NULL)) return -ENOMEM; /* @@ -735,9 +736,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) tlb_finish_mmu(&tlb); /* - * Shrink the vma to just the new range. Always succeeds. + * Shrink the vma to just the new range. */ - vma_adjust(vma, new_start, new_end, vma->vm_pgoff); + if (vma_shrink(&mas, vma, new_start, new_end, vma->vm_pgoff)) + return -ENOMEM; return 0; } diff --git a/include/linux/mm.h b/include/linux/mm.h index 0a827fae6f62..23172fe7b631 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2598,13 +2598,11 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node); /* mmap.c */ extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); -extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start, - unsigned long end, pgoff_t pgoff, struct vm_area_struct *expand); -static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start, - unsigned long end, pgoff_t pgoff) -{ - return __vma_adjust(vma, start, end, pgoff, NULL); -} +extern int vma_expand(struct ma_state *mas, struct vm_area_struct *vma, + unsigned long start, unsigned long end, pgoff_t pgoff, + struct vm_area_struct *next); +extern int vma_shrink(struct ma_state *mas, struct vm_area_struct *vma, + unsigned long start, unsigned long end, pgoff_t pgoff); extern struct vm_area_struct *vma_merge(struct mm_struct *, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, diff --git a/mm/mmap.c b/mm/mmap.c index 449c0167995e..a6e0dcbe7d2e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -663,9 +663,9 @@ again: * * Returns: 0 on success */ -inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma, - unsigned long start, unsigned long end, pgoff_t pgoff, - struct vm_area_struct *next) +int vma_expand(struct ma_state *mas, struct vm_area_struct *vma, + unsigned long start, unsigned long end, pgoff_t pgoff, + struct vm_area_struct *next) { bool remove_next = false; @@ -711,6 +711,43 @@ nomem: return -ENOMEM; } +/* + * vma_shrink() - Reduce an existing VMAs memory area + * @mas: The maple state + * @vma: The VMA to modify + * @start: The new start + * @end: The new end + * + * Returns: 0 on success, -ENOMEM otherwise + */ +int vma_shrink(struct ma_state *mas, struct vm_area_struct *vma, + unsigned long start, unsigned long end, pgoff_t pgoff) +{ + struct vma_locking vma_lock; + + WARN_ON((vma->vm_start != start) && (vma->vm_end != end)); + + if (mas_preallocate(mas, vma, GFP_KERNEL)) + return -ENOMEM; + + init_vma_lock(&vma_lock, vma); + vma_adjust_trans_huge(vma, start, end, 0); + lock_vma(&vma_lock); + + if (vma->vm_start < start) + vma_mas_szero(mas, vma->vm_start, start); + + if (vma->vm_end > end) + vma_mas_szero(mas, end, vma->vm_end); + + vma->vm_start = start; + vma->vm_end = end; + vma->vm_pgoff = pgoff; + unlock_vma(&vma_lock, mas, vma->vm_mm); + validate_mm(vma->vm_mm); + return 0; +} + /* * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that * is already present in an i_mmap tree without adjusting the tree. @@ -828,17 +865,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, lock_vma(&vma_lock); - if (vma->vm_start < start) { - vma_mas_szero(&mas, vma->vm_start, start); - } else if (start != vma->vm_start) { + if (start < vma->vm_start || end > vma->vm_end) vma_changed = true; - } - - if (vma->vm_end > end) { - vma_mas_szero(&mas, end, vma->vm_end); - } else if (end != vma->vm_end) { - vma_changed = true; - } vma->vm_start = start; vma->vm_end = end; @@ -850,7 +878,10 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, if (adjust_next) { next->vm_start += adjust_next; next->vm_pgoff += adjust_next >> PAGE_SHIFT; - vma_mas_store(next, &mas); + if (adjust_next < 0) { + BUG_ON(vma_changed); + vma_mas_store(next, &mas); + } } unlock_vma(&vma_lock, &mas, mm); -- 2.50.1