]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm-drop-oom-code-from-exit_mmap-fix-fix
authorAndrew Morton <akpm@linux-foundation.org>
Wed, 1 Jun 2022 22:17:52 +0000 (15:17 -0700)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Wed, 20 Jul 2022 00:15:03 +0000 (20:15 -0400)
restore Suren's mmap_read_lock() optimization

Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/mmap.c

index 9f5d25d0974bd70f356186ad1acc0737c20368f8..03e533bb534337f873f175bb35e8f82ea65a261d 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3130,13 +3130,13 @@ void exit_mmap(struct mm_struct *mm)
        /* mm's last user has gone, and its about to be pulled down */
        mmu_notifier_release(mm);
 
-       mmap_write_lock(mm);
+       mmap_read_lock(mm);
        arch_exit_mmap(mm);
 
        vma = mas_find(&mas, ULONG_MAX);
        if (!vma) {
                /* Can happen if dup_mmap() received an OOM */
-               mmap_write_unlock(mm);
+               mmap_read_unlock(mm);
                return;
        }
 
@@ -3146,6 +3146,7 @@ void exit_mmap(struct mm_struct *mm)
        /* update_hiwater_rss(mm) here? but nobody should be looking */
        /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
        unmap_vmas(&tlb, &mm->mm_mt, vma, 0, ULONG_MAX);
+       mmap_read_unlock(mm);
 
        /*
         * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper
@@ -3153,6 +3154,7 @@ void exit_mmap(struct mm_struct *mm)
         * mm_is_oom_victim because setting a bit unconditionally is cheaper.
         */
        set_bit(MMF_OOM_SKIP, &mm->flags);
+       mmap_write_lock(mm);
        free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS,
                      USER_PGTABLES_CEILING);
        tlb_finish_mmu(&tlb);