]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
vma_replace fix
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 28 Apr 2022 16:18:54 +0000 (12:18 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Wed, 11 May 2022 14:46:53 +0000 (10:46 -0400)
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
include/linux/mm.h
mm/mmap.c

index 5097b25a14adfcebd6b44369d30fdcc7b3ad0516..d0422f62b876e9b90437249cbd01e77c651049f2 100644 (file)
@@ -2643,14 +2643,8 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
 
 /* mmap.c */
 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
-extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
-       unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
-       struct vm_area_struct *expand);
-static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
-       unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
-{
-       return __vma_adjust(vma, start, end, pgoff, insert, NULL);
-}
+extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
+             unsigned long end, pgoff_t pgoff, struct vm_area_struct *expand);
 extern struct vm_area_struct *vma_merge(struct mm_struct *,
        struct vm_area_struct *prev, unsigned long addr, unsigned long end,
        unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
index dbd33af835d66beb3a6339bd3367409b2c7e1eae..2dc1de1cbd99861287abd2624ecebed143e18203 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -658,9 +658,8 @@ nomem:
  * are necessary.  The "insert" vma (if any) is to be inserted
  * before we drop the necessary locks.
  */
-int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
-       unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
-       struct vm_area_struct *expand)
+int vma_adjust(struct vm_area_struct *vma, unsigned long start,
+              unsigned long end, pgoff_t pgoff, struct vm_area_struct *expand)
 {
        struct mm_struct *mm = vma->vm_mm;
        struct vm_area_struct *next_next, *next = find_vma(mm, vma->vm_end);
@@ -675,7 +674,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
        MA_STATE(mas, &mm->mm_mt, 0, 0);
        struct vm_area_struct *exporter = NULL, *importer = NULL;
 
-       if (next && !insert) {
+       if (next) {
                if (end >= next->vm_end) {
                        /*
                         * vma expands, overlapping all the next, and
@@ -732,9 +731,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
                        VM_WARN_ON(expand != importer);
                } else if (end < vma->vm_end) {
                        /*
-                        * vma shrinks, and !insert tells it's not
-                        * split_vma inserting another: so it must be
-                        * mprotect case 4 shifting the boundary down.
+                        * vma shrinks so it must be mprotect case 4 shifting
+                        * the boundary down.
                         */
                        adjust_next = -(vma->vm_end - end);
                        exporter = vma;
@@ -774,15 +772,6 @@ again:
                        uprobe_munmap(next, next->vm_start, next->vm_end);
 
                i_mmap_lock_write(mapping);
-               if (insert && insert->vm_file) {
-                       /*
-                        * Put into interval tree now, so instantiated pages
-                        * are visible to arm/parisc __flush_dcache_page
-                        * throughout; but we cannot insert into address
-                        * space until vma start or end is updated.
-                        */
-                       __vma_link_file(insert, insert->vm_file->f_mapping);
-               }
        }
 
        anon_vma = vma->anon_vma;
@@ -805,10 +794,8 @@ again:
        }
 
        if (start != vma->vm_start) {
-               if ((vma->vm_start < start) &&
-                   (!insert || (insert->vm_end != start))) {
+               if (vma->vm_start < start) {
                        vma_mas_szero(&mas, vma->vm_start, start);
-                       VM_WARN_ON(insert && insert->vm_start > vma->vm_start);
                } else {
                        vma_changed = true;
                }
@@ -816,12 +803,8 @@ again:
        }
        if (end != vma->vm_end) {
                if (vma->vm_end > end) {
-                       if (!insert || (insert->vm_start != end)) {
-                               vma_mas_szero(&mas, end, vma->vm_end);
-                               mas_reset(&mas);
-                               VM_WARN_ON(insert &&
-                                          insert->vm_end < vma->vm_end);
-                       }
+                       vma_mas_szero(&mas, end, vma->vm_end);
+                       mas_reset(&mas);
                } else {
                        vma_changed = true;
                }
@@ -845,18 +828,8 @@ again:
                flush_dcache_mmap_unlock(mapping);
        }
 
-       if (remove_next && file) {
+       if (remove_next && file)
                __remove_shared_vm_struct(next, file, mapping);
-       } else if (insert) {
-               /*
-                * split_vma has split insert from vma, and needs
-                * us to insert it before dropping the locks
-                * (it may either follow vma or precede it).
-                */
-               mas_reset(&mas);
-               vma_mas_store(insert, &mas);
-               mm->map_count++;
-       }
 
        if (anon_vma) {
                anon_vma_interval_tree_post_update_vma(vma);
@@ -918,9 +891,6 @@ again:
                        goto again;
                }
        }
-       if (insert && file) {
-               uprobe_mmap(insert);
-       }
 
        mas_destroy(&mas);
        validate_mm(mm);
@@ -1114,12 +1084,11 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                                is_mergeable_anon_vma(prev->anon_vma,
                                                      next->anon_vma, NULL)) {
                                                        /* cases 1, 6 */
-                       err = __vma_adjust(prev, prev->vm_start,
-                                        next->vm_end, prev->vm_pgoff, NULL,
-                                        prev);
+                       err = vma_adjust(prev, prev->vm_start, next->vm_end,
+                                        prev->vm_pgoff, prev);
                } else                                  /* cases 2, 5, 7 */
-                       err = __vma_adjust(prev, prev->vm_start,
-                                        end, prev->vm_pgoff, NULL, prev);
+                       err = vma_adjust(prev, prev->vm_start, end,
+                                        prev->vm_pgoff, prev);
                if (err)
                        return NULL;
                khugepaged_enter_vma_merge(prev, vm_flags);
@@ -1135,11 +1104,11 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                                             anon_vma, file, pgoff+pglen,
                                             vm_userfaultfd_ctx, anon_name)) {
                if (prev && addr < prev->vm_end)        /* case 4 */
-                       err = __vma_adjust(prev, prev->vm_start,
-                                        addr, prev->vm_pgoff, NULL, next);
+                       err = vma_adjust(prev, prev->vm_start, addr,
+                                        prev->vm_pgoff, next);
                else {                                  /* cases 3, 8 */
-                       err = __vma_adjust(area, addr, next->vm_end,
-                                        next->vm_pgoff - pglen, NULL, next);
+                       err = vma_adjust(area, addr, next->vm_end,
+                                        next->vm_pgoff - pglen, next);
                        /*
                         * In case 3 area is already equal to next and
                         * this is a noop, but in case 8 "area" has
@@ -2288,7 +2257,7 @@ static inline int vma_replace(struct vm_area_struct *vma,
        mas_store(&mas, start);
        mm->map_count++;
        mas_set_range(&mas, end->vm_start, end->vm_end - 1);
-       mas_store_prealloc(&mas, end);
+       mas_store(&mas, end);
        /* mmap_count is fine here since one vma was just overwritten */
        BUG_ON(start->vm_end != end->vm_start);