]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm-drop-oom-code-from-exit_mmap-fix-fix
authorAndrew Morton <akpm@linux-foundation.org>
Wed, 1 Jun 2022 22:17:52 +0000 (15:17 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Aug 2022 05:03:23 +0000 (22:03 -0700)
restore Suren's mmap_read_lock() optimization

Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/mmap.c

index 8e85d06f0c2e3b9b5f2ff2c2d8cb51262f544101..be111bbe807547b48a26dbfdd4ee05c005fc6d23 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3085,13 +3085,13 @@ void exit_mmap(struct mm_struct *mm)
        /* mm's last user has gone, and its about to be pulled down */
        mmu_notifier_release(mm);
 
-       mmap_write_lock(mm);
+       mmap_read_lock(mm);
        arch_exit_mmap(mm);
 
        vma = mas_find(&mas, ULONG_MAX);
        if (!vma) {
                /* Can happen if dup_mmap() received an OOM */
-               mmap_write_unlock(mm);
+               mmap_read_unlock(mm);
                return;
        }
 
@@ -3101,6 +3101,7 @@ void exit_mmap(struct mm_struct *mm)
        /* update_hiwater_rss(mm) here? but nobody should be looking */
        /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
        unmap_vmas(&tlb, &mm->mm_mt, vma, 0, ULONG_MAX);
+       mmap_read_unlock(mm);
 
        /*
         * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper
@@ -3108,6 +3109,7 @@ void exit_mmap(struct mm_struct *mm)
         * mm_is_oom_victim because setting a bit unconditionally is cheaper.
         */
        set_bit(MMF_OOM_SKIP, &mm->flags);
+       mmap_write_lock(mm);
        free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS,
                      USER_PGTABLES_CEILING);
        tlb_finish_mmu(&tlb);