From: Liam R. Howlett Date: Mon, 21 Sep 2020 19:37:14 +0000 (-0400) Subject: mm/mmap: Introduce unlock_range() for code cleanup X-Git-Tag: howlett/maple_spf/20210129~63 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=6e80c960ff44e17811eadcdc69ccbdfa424c98c8;p=users%2Fjedix%2Flinux-maple.git mm/mmap: Introduce unlock_range() for code cleanup Signed-off-by: Liam R. Howlett --- diff --git a/mm/mmap.c b/mm/mmap.c index d60f7ba5075d..cae07b456f64 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3011,6 +3011,20 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, return __split_vma(mm, vma, addr, new_below); } +static inline void unlock_range(struct vm_area_struct *start, unsigned long limit) +{ + struct mm_struct *mm = start->vm_mm; + struct vm_area_struct *tmp = start; + + while (tmp && tmp->vm_start < limit) { + if (tmp->vm_flags & VM_LOCKED) { + mm->locked_vm -= vma_pages(tmp); + munlock_vma_pages_all(tmp); + } + + tmp = tmp->vm_next; + } +} /* Munmap is split into 2 main parts -- this part which finds * what needs doing, and the areas themselves, which do the * work. This now handles partial unmappings. @@ -3099,17 +3113,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, /* * unlock any mlock()ed ranges before detaching vmas */ - if (mm->locked_vm) { - struct vm_area_struct *tmp = vma; - while (tmp && tmp->vm_start < end) { - if (tmp->vm_flags & VM_LOCKED) { - mm->locked_vm -= vma_pages(tmp); - munlock_vma_pages_all(tmp); - } - - tmp = tmp->vm_next; - } - } + if (mm->locked_vm) + unlock_range(vma, end); /* Detach vmas from rbtree */ if (!detach_vmas_to_be_unmapped(mm, vma, prev, end)) @@ -3412,14 +3417,8 @@ void exit_mmap(struct mm_struct *mm) mmap_write_unlock(mm); } - if (mm->locked_vm) { - vma = mm->mmap; - while (vma) { - if (vma->vm_flags & VM_LOCKED) - munlock_vma_pages_all(vma); - vma = vma->vm_next; - } - } + if (mm->locked_vm) + unlock_range(mm->mmap, ULONG_MAX); arch_exit_mmap(mm);