]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: conditionally write-lock VMA in free_pgtables
authorSuren Baghdasaryan <surenb@google.com>
Mon, 27 Feb 2023 17:36:18 +0000 (09:36 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 28 Mar 2023 23:24:53 +0000 (16:24 -0700)
Normally free_pgtables needs to lock affected VMAs except for the case
when VMAs were isolated under VMA write-lock.  munmap() does just that,
isolating while holding appropriate locks and then downgrading mmap_lock
and dropping per-VMA locks before freeing page tables.  Add a parameter to
free_pgtables for such scenario.

Link: https://lkml.kernel.org/r/20230227173632.3292573-20-surenb@google.com
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/internal.h
mm/memory.c
mm/mmap.c

index 08ce56dbb1d997c01ce38d1606c4176ac04db25a..fce94775819c0f0e931c513ee73990a94b44c018 100644 (file)
@@ -105,7 +105,7 @@ void folio_activate(struct folio *folio);
 
 void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
                   struct vm_area_struct *start_vma, unsigned long floor,
-                  unsigned long ceiling);
+                  unsigned long ceiling, bool mm_wr_locked);
 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
 
 struct zap_details;
index 36e8ba43e4d3041c494de394e6939847faf5eb2a..d100999bc43319344ab4a40736881ea181b29bdb 100644 (file)
@@ -348,7 +348,7 @@ void free_pgd_range(struct mmu_gather *tlb,
 
 void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
                   struct vm_area_struct *vma, unsigned long floor,
-                  unsigned long ceiling)
+                  unsigned long ceiling, bool mm_wr_locked)
 {
        MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
 
@@ -366,6 +366,8 @@ void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
                 * Hide vma from rmap and truncate_pagecache before freeing
                 * pgtables
                 */
+               if (mm_wr_locked)
+                       vma_start_write(vma);
                unlink_anon_vmas(vma);
                unlink_file_vma(vma);
 
@@ -380,6 +382,8 @@ void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
                               && !is_vm_hugetlb_page(next)) {
                                vma = next;
                                next = mas_find(&mas, ceiling - 1);
+                               if (mm_wr_locked)
+                                       vma_start_write(vma);
                                unlink_anon_vmas(vma);
                                unlink_file_vma(vma);
                        }
index 680aecc1d5b62aa490eec56b6df01eb53befad1e..36f2e5a37a42b7c8423e311e15f8c743fa921773 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2151,7 +2151,8 @@ static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
        update_hiwater_rss(mm);
        unmap_vmas(&tlb, mt, vma, start, end, mm_wr_locked);
        free_pgtables(&tlb, mt, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
-                                next ? next->vm_start : USER_PGTABLES_CEILING);
+                                next ? next->vm_start : USER_PGTABLES_CEILING,
+                                mm_wr_locked);
        tlb_finish_mmu(&tlb);
 }
 
@@ -3049,7 +3050,7 @@ void exit_mmap(struct mm_struct *mm)
        mmap_write_lock(mm);
        mt_clear_in_rcu(&mm->mm_mt);
        free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS,
-                     USER_PGTABLES_CEILING);
+                     USER_PGTABLES_CEILING, true);
        tlb_finish_mmu(&tlb);
 
        /*