]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/mmap: Fix do_brk_flags and do_brk_munmap locking.
authorLiam R. Howlett <Liam.Howlett@Oracle.com>
Tue, 1 Dec 2020 02:26:18 +0000 (21:26 -0500)
committerLiam R. Howlett <Liam.Howlett@Oracle.com>
Tue, 5 Jan 2021 17:33:30 +0000 (12:33 -0500)
Also use the knowledge of finding a mapping at newbrk to know that a munmap is
necessary.

Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
mm/mmap.c

index 6b5fdaffb7b941c4796e03182c96f160649c2faa..b43452e0ae9e9603a9358061369fb14a0a15eaf3 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -197,7 +197,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
        bool populate;
        bool downgraded = false;
        LIST_HEAD(uf);
-       MA_STATE(mas, &mm->mm_mt, mm->start_brk, mm->start_brk);
+       MA_STATE(mas, &mm->mm_mt, 0, 0);
 
        if (mmap_write_lock_killable(mm))
                return -EINTR;
@@ -236,45 +236,40 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
                mm->brk = brk;
                goto success;
        }
-       if (mm->brk > mm->start_brk)
-               mas_set(&mas, mm->brk - 1);
-       brkvma = mas_walk(&mas);
-       if (brkvma) {
 
+       mas_set(&mas, newbrk);
+       brkvma = mas_walk(&mas);
+       if (brkvma) { // munmap necessary, there is something at newbrk.
                /*
                 * Always allow shrinking brk.
                 * do_brk_munmap() may downgrade mmap_lock to read.
                 */
-               if (brk <= mm->brk) { // newbrk < oldbrk
-                       int ret;
-                       /*
-                        * mm->brk must to be protected by write mmap_lock.
-                        * do_brk_munmap() may downgrade the lock,  so update it
-                        * before calling do_brk_munmap().
-                        */
-                       mm->brk = brk;
-                       ret = do_brk_munmap(&mas, brkvma, newbrk, oldbrk, &uf);
-                       if (ret == 1)  {
-                               downgraded = true;
-                               goto success;
-                       } else if (!ret)
-                               goto success;
-
-                       mm->brk = origbrk;
-                       goto out;
-               }
-               next = brkvma->vm_next;
-       } else {
-               next = mas_next(&mas, ULONG_MAX);
-               mas_prev(&mas, 0);
+               int ret;
+               /*
+                * mm->brk must to be protected by write mmap_lock.
+                * do_brk_munmap() may downgrade the lock,  so update it
+                * before calling do_brk_munmap().
+                */
+               mm->brk = brk;
+               mas.last = oldbrk - 1;
+               ret = do_brk_munmap(&mas, brkvma, newbrk, oldbrk, &uf);
+               if (ret == 1)  {
+                       downgraded = true;
+                       goto success;
+               } else if (!ret)
+                       goto success;
+
+               mm->brk = origbrk;
+               goto out;
        }
-
+       next = mas_next(&mas, ULONG_MAX);
        /* Check against existing mmap mappings. */
        if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
                goto out;
 
        /* Ok, looks good - let it rip. */
-       if (do_brk_flags(&mas, &brkvma, oldbrk, newbrk-oldbrk, 0) < 0)
+       brkvma = mas_prev(&mas, mm->start_brk);
+       if (do_brk_flags(&mas, &brkvma, oldbrk, newbrk - oldbrk, 0) < 0)
                goto out;
 
        mm->brk = brk;
@@ -2984,15 +2979,19 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 
        // Change the oldbrk of vma to the newbrk of the munmap area
        vma_adjust_trans_huge(vma, vma->vm_start, newbrk, 0);
-       anon_vma_lock_write(vma->anon_vma);
-       anon_vma_interval_tree_pre_update_vma(vma);
+       if (vma->anon_vma) {
+               anon_vma_lock_write(vma->anon_vma);
+               anon_vma_interval_tree_pre_update_vma(vma);
+       }
 
        vma->vm_end = newbrk;
        if (vma_mas_remove(&unmap, mas))
                goto mas_store_fail;
 
-       anon_vma_interval_tree_post_update_vma(vma);
-       anon_vma_unlock_write(vma->anon_vma);
+       if (vma->anon_vma) {
+               anon_vma_interval_tree_post_update_vma(vma);
+               anon_vma_unlock_write(vma->anon_vma);
+       }
 
        unmap_pages = vma_pages(&unmap);
        if (unmap.vm_flags & VM_LOCKED) {
@@ -3063,25 +3062,37 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct **brkvma,
        if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
                return -ENOMEM;
 
-       if (brkvma && *brkvma) {
+       mas->last = addr + len - 1;
+       if (*brkvma) {
                vma = *brkvma;
                /* Expand the existing vma if possible; almost never a singular
                 * list, so this will almost always fail. */
-               if (list_is_singular(&vma->anon_vma_chain) &&
-                   ((vma->vm_flags & ~VM_SOFTDIRTY) == flags)) {
-                       anon_vma_lock_write(vma->anon_vma);
-                       anon_vma_interval_tree_pre_update_vma(vma);
-                       vma->vm_end = addr + len;
+
+               if ((!vma->anon_vma ||
+                    list_is_singular(&vma->anon_vma_chain)) &&
+                    ((vma->vm_flags & ~VM_SOFTDIRTY) == flags)){
                        mas->index = vma->vm_start;
-                       mas->last = vma->vm_end - 1;
+
+                       vma_adjust_trans_huge(vma, addr, addr + len, 0);
+                       if (vma->anon_vma) {
+                               anon_vma_lock_write(vma->anon_vma);
+                               anon_vma_interval_tree_pre_update_vma(vma);
+                       }
+                       vma->vm_end = addr + len;
+                       vma->vm_flags |= VM_SOFTDIRTY;
                        if (mas_store_gfp(mas, vma, GFP_KERNEL))
                                goto mas_mod_fail;
-                       anon_vma_interval_tree_post_update_vma(vma);
-                       anon_vma_unlock_write(vma->anon_vma);
+                       if (vma->anon_vma) {
+                               anon_vma_interval_tree_post_update_vma(vma);
+                               anon_vma_unlock_write(vma->anon_vma);
+                       }
+                       khugepaged_enter_vma_merge(vma, flags);
                        goto out;
                }
                prev = vma;
        }
+       mas->index = addr;
+       mas_walk(mas);
 
        /* create a vma struct for an anonymous mapping */
        vma = vm_area_alloc(mm);
@@ -3099,6 +3110,7 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct **brkvma,
 
        if (!prev)
                prev = mas_prev(mas, 0);
+
        __vma_link_list(mm, vma, prev);
        mm->map_count++;
        *brkvma = vma;