From: Liam R. Howlett <Liam.Howlett@oracle.com> Date: Wed, 30 Mar 2022 17:35:49 +0000 (-0400) Subject: mm: Change munmap splitting order and move_vma() X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=ef997fbd234b14fc954ab1eda08e6cb00fe88f11;p=users%2Fjedix%2Flinux-maple.git mm: Change munmap splitting order and move_vma() Splitting can be more efficient when done in the reverse order to minimize VMA walking. Change do_mas_align_munmap() to reduce walking of the tree during split operations. move_vma() must also be altered to remove the dependency of keeping the original VMA as the active part of the split. Look up the new VMA or two if necessary. Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com> --- diff --git a/mm/mmap.c b/mm/mmap.c index f52bbf9aa0245..b39f1b4f79db9 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2338,14 +2338,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, mt_set_external_lock(&mt_detach, &mm->mmap_lock); mas->last = end - 1; - /* - * If we need to split any vma, do it now to save pain later. - * - * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially - * unmapped vm_area_struct will remain in use: so lower split_vma - * places tmp vma above, and higher split_vma places tmp vma below. - */ - + /* If we need to split any vma, do it now to save pain later. */ /* Does it split the first one? */ if (start > vma->vm_start) { int error; @@ -2362,18 +2355,18 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, * mas_pause() is not needed since mas->index needs to be set * differently than vma->vm_end anyways. */ - error = __split_vma(mm, vma, start, 0); + error = __split_vma(mm, vma, start, 1); if (error) return error; - mas_set(mas, start); - vma = mas_walk(mas); + mas_set(mas, start - 1); + prev = mas_walk(mas); + } else { + prev = mas_prev(mas, 0); + if (unlikely((!prev))) + mas_set(mas, start); } - prev = mas_prev(mas, 0); - if (unlikely((!prev))) - mas_set(mas, start); - /* * Detach a range of VMAs from the mm. Using next as a temp variable as * it is always overwritten. @@ -2381,20 +2374,13 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, mas_for_each(mas, next, end - 1) { /* Does it split the end? */ if (next->vm_end > end) { - struct vm_area_struct *split; int error; - error = __split_vma(mm, next, end, 1); + error = __split_vma(mm, next, end, 0); if (error) return error; mas_set(mas, end); - split = mas_prev(mas, 0); - munmap_sidetree(split, &mas_detach); - count++; - if (vma == next) - vma = split; - break; } count++; munmap_sidetree(next, &mas_detach); @@ -2404,9 +2390,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, #endif } - if (!next) - next = mas_next(mas, ULONG_MAX); - + next = mas_find(mas, ULONG_MAX); if (unlikely(uf)) { /* * If userfaultfd_unmap_prep returns an error the vmas diff --git a/mm/mremap.c b/mm/mremap.c index c0d32330d4353..8d9f89e49bd82 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -578,9 +578,9 @@ static unsigned long move_vma(struct vm_area_struct *vma, unsigned long vm_flags = vma->vm_flags; unsigned long new_pgoff; unsigned long moved_len; - unsigned long excess = 0; + unsigned long account_start = 0; + unsigned long account_end = 0; unsigned long hiwater_vm; - int split = 0; int err = 0; bool need_rmap_locks; @@ -660,10 +660,10 @@ static unsigned long move_vma(struct vm_area_struct *vma, /* Conceal VM_ACCOUNT so old reservation is not undone */ if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) { vma->vm_flags &= ~VM_ACCOUNT; - excess = vma->vm_end - vma->vm_start - old_len; - if (old_addr > vma->vm_start && - old_addr + old_len < vma->vm_end) - split = 1; + if (vma->vm_start < old_addr) + account_start = vma->vm_start; + if (vma->vm_end > old_addr + old_len) + account_end = vma->vm_end - 1; } /* @@ -702,7 +702,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, /* OOM: unable to split vma, just get accounts right */ if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) vm_acct_memory(old_len >> PAGE_SHIFT); - excess = 0; + account_start = account_end = 0; } if (vm_flags & VM_LOCKED) { @@ -713,10 +713,14 @@ static unsigned long move_vma(struct vm_area_struct *vma, mm->hiwater_vm = hiwater_vm; /* Restore VM_ACCOUNT if one or two pieces of vma left */ - if (excess) { + if (account_start) { + vma = vma_lookup(mm, account_start); + vma->vm_flags |= VM_ACCOUNT; + } + + if (account_end) { + vma = vma_lookup(mm, account_end); vma->vm_flags |= VM_ACCOUNT; - if (split) - find_vma(mm, vma->vm_end)->vm_flags |= VM_ACCOUNT; } return new_addr;