]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/mmap: Introduce unlock_range() for code cleanup
authorLiam R. Howlett <Liam.Howlett@Oracle.com>
Mon, 21 Sep 2020 19:37:14 +0000 (15:37 -0400)
committerLiam R. Howlett <Liam.Howlett@Oracle.com>
Mon, 10 May 2021 21:05:22 +0000 (17:05 -0400)
Both __do_munmap() and exit_mmap() unlock a range of VMAs using almost
identical code blocks.  Replace both blocks by a static inline function.

Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/mmap.c

index 81f5595a84906a6bbb04bf0c5d368dd4a4cf3174..1bbb3225c9588ab8856864faf71da7cc1f94f4f3 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2801,6 +2801,22 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        return __split_vma(mm, vma, addr, new_below);
 }
 
+static inline
+void unlock_range(struct vm_area_struct *start, unsigned long limit)
+{
+       struct mm_struct *mm = start->vm_mm;
+       struct vm_area_struct *tmp = start;
+
+       while (tmp && tmp->vm_start < limit) {
+               if (tmp->vm_flags & VM_LOCKED) {
+                       mm->locked_vm -= vma_pages(tmp);
+                       munlock_vma_pages_all(tmp);
+               }
+
+               tmp = tmp->vm_next;
+       }
+}
+
 /* Munmap is split into 2 main parts -- this part which finds
  * what needs doing, and the areas themselves, which do the
  * work.  This now handles partial unmappings.
@@ -2889,17 +2905,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
        /*
         * unlock any mlock()ed ranges before detaching vmas
         */
-       if (mm->locked_vm) {
-               struct vm_area_struct *tmp = vma;
-               while (tmp && tmp->vm_start < end) {
-                       if (tmp->vm_flags & VM_LOCKED) {
-                               mm->locked_vm -= vma_pages(tmp);
-                               munlock_vma_pages_all(tmp);
-                       }
-
-                       tmp = tmp->vm_next;
-               }
-       }
+       if (mm->locked_vm)
+               unlock_range(vma, end);
 
        /* Detach vmas from rbtree */
        if (!detach_vmas_to_be_unmapped(mm, vma, prev, end))
@@ -3184,14 +3191,8 @@ void exit_mmap(struct mm_struct *mm)
                mmap_write_unlock(mm);
        }
 
-       if (mm->locked_vm) {
-               vma = mm->mmap;
-               while (vma) {
-                       if (vma->vm_flags & VM_LOCKED)
-                               munlock_vma_pages_all(vma);
-                       vma = vma->vm_next;
-               }
-       }
+       if (mm->locked_vm)
+               unlock_range(mm->mmap, ULONG_MAX);
 
        arch_exit_mmap(mm);