]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/mmap: Don't use __vma_adjust() in shift_arg_pages()
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Wed, 10 Aug 2022 21:24:05 +0000 (17:24 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Fri, 26 Aug 2022 17:57:55 +0000 (13:57 -0400)
Introduce shrink_vma() which uses the lock_vma() and unlock_vma()
functions to reduce the vma coverage.

Convert shift_arg_pages() to use expand_vma() and the new shrink_vma()
function.  Remove shrink_vma() support from __vma_adjust() since
shift_arg_pages() is the only user that shrinks a VMA in this way.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
fs/exec.c
include/linux/mm.h
mm/mmap.c

index f3a5304344d31f5d2f84d5331628de1c9b7c00db..50e83652f75afa30efd821b351a3213a1c7190c7 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -686,6 +686,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
        VMA_ITERATOR(vmi, mm, new_start);
        struct vm_area_struct *next;
        struct mmu_gather tlb;
+       MA_STATE(mas, &vma->vm_mm->mm_mt, 0, 0);
 
        BUG_ON(new_start > new_end);
 
@@ -699,7 +700,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
        /*
         * cover the whole range: [new_start, old_end)
         */
-       if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff))
+       if (vma_expand(&mas, vma, new_start, old_end, vma->vm_pgoff, NULL))
                return -ENOMEM;
 
        /*
@@ -732,9 +733,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
        tlb_finish_mmu(&tlb);
 
        /*
-        * Shrink the vma to just the new range.  Always succeeds.
+        * Shrink the vma to just the new range.
         */
-       vma_adjust(vma, new_start, new_end, vma->vm_pgoff);
+       if (vma_shrink(&mas, vma, new_start, new_end, vma->vm_pgoff))
+               return -ENOMEM;
 
        return 0;
 }
index 81b477cbdbf31c956d474b9ca9db1b7811853706..6e1a66901ad799b7db3e16f6204ae5f36c5a7f5f 100644 (file)
@@ -2641,13 +2641,11 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
 
 /* mmap.c */
 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
-extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
-       unsigned long end, pgoff_t pgoff, struct vm_area_struct *expand);
-static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
-       unsigned long end, pgoff_t pgoff)
-{
-       return __vma_adjust(vma, start, end, pgoff, NULL);
-}
+extern int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
+                     unsigned long start, unsigned long end, pgoff_t pgoff,
+                     struct vm_area_struct *next);
+extern int vma_shrink(struct ma_state *mas, struct vm_area_struct *vma,
+                     unsigned long start, unsigned long end, pgoff_t pgoff);
 extern struct vm_area_struct *vma_merge(struct mm_struct *,
        struct vm_area_struct *prev, unsigned long addr, unsigned long end,
        unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
index 301bc593f78b8442d033648cc498a85d21203c82..69c69a5b2a59226fb6da4d8fffbe04168fe7abe6 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -663,9 +663,9 @@ again:
  *
  * Returns: 0 on success
  */
-inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
-                     unsigned long start, unsigned long end, pgoff_t pgoff,
-                     struct vm_area_struct *next)
+int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
+              unsigned long start, unsigned long end, pgoff_t pgoff,
+              struct vm_area_struct *next)
 
 {
        bool remove_next = false;
@@ -711,6 +711,43 @@ nomem:
        return -ENOMEM;
 }
 
+/*
+ * vma_shrink() - Reduce an existing VMAs memory area
+ * @mas: The maple state
+ * @vma: The VMA to modify
+ * @start: The new start
+ * @end: The new end
+ *
+ * Returns: 0 on success, -ENOMEM otherwise
+ */
+int vma_shrink(struct ma_state *mas, struct vm_area_struct *vma,
+              unsigned long start, unsigned long end, pgoff_t pgoff)
+{
+       struct vma_locking vma_lock;
+
+       WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
+
+       if (mas_preallocate(mas, vma, GFP_KERNEL))
+               return -ENOMEM;
+
+       init_vma_lock(&vma_lock, vma);
+       vma_adjust_trans_huge(vma, start, end, 0);
+       lock_vma(&vma_lock);
+
+       if (vma->vm_start < start)
+               vma_mas_szero(mas, vma->vm_start, start);
+
+       if (vma->vm_end > end)
+               vma_mas_szero(mas, end, vma->vm_end);
+
+       vma->vm_start = start;
+       vma->vm_end = end;
+       vma->vm_pgoff = pgoff;
+       unlock_vma(&vma_lock, mas, vma->vm_mm);
+       validate_mm(vma->vm_mm);
+       return 0;
+}
+
 /*
  * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
  * is already present in an i_mmap tree without adjusting the tree.
@@ -828,17 +865,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
 
        lock_vma(&vma_lock);
 
-       if (vma->vm_start < start) {
-               vma_mas_szero(&mas, vma->vm_start, start);
-       } else if (start != vma->vm_start) {
+       if (start < vma->vm_start || end > vma->vm_end)
                vma_changed = true;
-       }
-
-       if (vma->vm_end > end) {
-               vma_mas_szero(&mas, end, vma->vm_end);
-       } else if (end != vma->vm_end) {
-               vma_changed = true;
-       }
 
        vma->vm_start = start;
        vma->vm_end = end;
@@ -850,7 +878,10 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
        if (adjust_next) {
                next->vm_start += adjust_next;
                next->vm_pgoff += adjust_next >> PAGE_SHIFT;
-               vma_mas_store(next, &mas);
+               if (adjust_next < 0) {
+                       BUG_ON(vma_changed);
+                       vma_mas_store(next, &mas);
+               }
        }
 
        unlock_vma(&vma_lock, &mas, mm);