]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: Add vma iterator to vma_adjust() arguments v6.1-rc4_vmi
authorLiam Howlett <Liam.Howlett@oracle.com>
Mon, 14 Nov 2022 19:54:13 +0000 (14:54 -0500)
committerLiam Howlett <Liam.Howlett@oracle.com>
Mon, 14 Nov 2022 19:57:07 +0000 (14:57 -0500)
Change the vma_adjust() function definition to accept the vma iterator
and pass it through to __vma_adjust().

Update fs/exec to use the new vma_adjust() function parameters.

Revert the __split_vma() calls back from __vma_adjust() to vma_adjust()
and pass through the vma iterator.

Signed-off-by: Liam Howlett <Liam.Howlett@oracle.com>
fs/exec.c
include/linux/mm.h
mm/mmap.c

index de9eaf5926bc5bfac1e9291394759a2504dc4cf7..8d2b47e436b01919ad61c997938cfb9627a50aa9 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -698,7 +698,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
        /*
         * cover the whole range: [new_start, old_end)
         */
-       if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
+       if (vma_adjust(&vmi, vma, new_start, old_end, vma->vm_pgoff, NULL))
                return -ENOMEM;
 
        /*
@@ -730,12 +730,9 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
        }
        tlb_finish_mmu(&tlb);
 
-       /*
-        * Shrink the vma to just the new range.  Always succeeds.
-        */
-       vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
-
-       return 0;
+       vma_prev(&vmi);
+       /* Shrink the vma to just the new range */
+       return vma_adjust(&vmi, vma, new_start, new_end, vma->vm_pgoff, NULL);
 }
 
 /*
index 82014c1cd5c031aadeccca6f5209a66b9f2c09af..11a1985806a92dd29880cd7ef59d83df41a1c72b 100644 (file)
@@ -2752,12 +2752,11 @@ extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admi
 extern int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start,
        unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
        struct vm_area_struct *expand);
-static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
-       unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
+static inline int vma_adjust(struct vma_iterator *vmi,
+       struct vm_area_struct *vma, unsigned long start, unsigned long end,
+       pgoff_t pgoff, struct vm_area_struct *insert)
 {
-       VMA_ITERATOR(vmi, vma->vm_mm, start);
-
-       return __vma_adjust(&vmi, vma, start, end, pgoff, insert, NULL);
+       return __vma_adjust(vmi, vma, start, end, pgoff, insert, NULL);
 }
 extern struct vm_area_struct *vma_merge(struct vma_iterator *vmi,
        struct mm_struct *, struct vm_area_struct *prev, unsigned long addr,
index e8da16e5054ae63685a009fc46d8629c79873803..22e8331add67edf5d3d7bb5c9e70f6866d2ace77 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2194,12 +2194,12 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
                new->vm_ops->open(new);
 
        if (new_below)
-               err = __vma_adjust(vmi, vma, addr, vma->vm_end,
-                  vma->vm_pgoff + ((addr - new->vm_start) >> PAGE_SHIFT),
-                  new, NULL);
+               err = vma_adjust(vmi, vma, addr, vma->vm_end,
+                       vma->vm_pgoff + ((addr - new->vm_start) >> PAGE_SHIFT),
+                       new);
        else
-               err = __vma_adjust(vmi, vma, vma->vm_start, addr, vma->vm_pgoff,
-                                new, NULL);
+               err = vma_adjust(vmi, vma, vma->vm_start, addr, vma->vm_pgoff,
+                                new);
 
        /* Success. */
        if (!err) {