]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mmap: change zeroing of maple tree in __vma_adjust()
authorLiam R. Howlett <Liam.Howlett@Oracle.com>
Thu, 14 Apr 2022 06:07:15 +0000 (23:07 -0700)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Wed, 11 May 2022 13:09:53 +0000 (09:09 -0400)
Only write to the maple tree if we are not inserting or the insert isn't
going to overwrite the area to clear.  This avoids spanning writes and
node coealescing when unnecessary.

The change requires a custom search for the linked list addition to find
the correct VMA for the prev link.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
mm/mmap.c

index cb649a0f3a71b0e0e8e02aec5a1741ae50c8cb69..18005c04afe5313983431517152d14697f7ada06 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -613,11 +613,11 @@ static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
  * mm's list and the mm tree.  It has already been inserted into the interval tree.
  */
 static void __insert_vm_struct(struct mm_struct *mm, struct ma_state *mas,
-                              struct vm_area_struct *vma)
+               struct vm_area_struct *vma, unsigned long location)
 {
        struct vm_area_struct *prev;
 
-       mas_set(mas, vma->vm_start);
+       mas_set(mas, location);
        prev = mas_prev(mas, 0);
        vma_mas_store(vma, mas);
        __vma_link_list(mm, vma, prev);
@@ -647,6 +647,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
        int remove_next = 0;
        MA_STATE(mas, &mm->mm_mt, 0, 0);
        struct vm_area_struct *exporter = NULL, *importer = NULL;
+       unsigned long ll_prev = vma->vm_start; /* linked list prev. */
 
        if (next && !insert) {
                if (end >= next->vm_end) {
@@ -778,15 +779,27 @@ again:
        }
 
        if (start != vma->vm_start) {
-               if (vma->vm_start < start)
+               if ((vma->vm_start < start) &&
+                   (!insert || (insert->vm_end != start))) {
                        vma_mas_szero(&mas, vma->vm_start, start);
-               vma_changed = true;
+                       VM_WARN_ON(insert && insert->vm_start > vma->vm_start);
+               } else {
+                       vma_changed = true;
+               }
                vma->vm_start = start;
        }
        if (end != vma->vm_end) {
-               if (vma->vm_end > end)
-                       vma_mas_szero(&mas, end, vma->vm_end);
-               vma_changed = true;
+               if (vma->vm_end > end) {
+                       if (!insert || (insert->vm_start != end)) {
+                               vma_mas_szero(&mas, end, vma->vm_end);
+                               VM_WARN_ON(insert &&
+                                          insert->vm_end < vma->vm_end);
+                       } else if (insert->vm_start == end) {
+                               ll_prev = vma->vm_end;
+                       }
+               } else {
+                       vma_changed = true;
+               }
                vma->vm_end = end;
                if (!next)
                        mm->highest_vm_end = vm_end_gap(vma);
@@ -821,7 +834,7 @@ again:
                 * us to insert it before dropping the locks
                 * (it may either follow vma or precede it).
                 */
-               __insert_vm_struct(mm, &mas, insert);
+               __insert_vm_struct(mm, &mas, insert, ll_prev);
        }
 
        if (anon_vma) {
@@ -908,6 +921,7 @@ again:
        if (insert && file)
                uprobe_mmap(insert);
 
+       mas_destroy(&mas);
        validate_mm(mm);
        return 0;
 }