]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mmu_gather: move tlb flush for VM_PFNMAP/VM_MIXEDMAP vmas into free_pgtables()
authorRoman Gushchin <roman.gushchin@linux.dev>
Mon, 27 Jan 2025 19:53:21 +0000 (19:53 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 4 Mar 2025 05:49:24 +0000 (21:49 -0800)
Commit b67fbebd4cf9 ("mmu_gather: Force tlb-flush VM_PFNMAP vmas") added a
forced tlbflush to tlb_vma_end(), which is required to avoid a race
between munmap() and unmap_mapping_range().  However it added some
overhead to other paths where tlb_vma_end() is used, but vmas are not
removed, e.g.  madvise(MADV_DONTNEED).

Fix this by moving the tlb flush out of tlb_end_vma() into
free_pgtables(), somewhat similar to the stable version of the original
commit: e.g.  stable commit 895428ee124a ("mm: Force TLB flush for PFNMAP
mappings before unlink_file_vma()").

Note, that if tlb->fullmm is set, no flush is required, as the whole mm is
about to be destroyed.

Link: https://lkml.kernel.org/r/20250127195321.35779-1-roman.gushchin@linux.dev
Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Jann Horn <jannh@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will@kernel.org>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Nick Piggin <npiggin@gmail.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/asm-generic/tlb.h
mm/memory.c

index e402aef79c93ec9c0280d94055c00e9d93763491..dd673ec5989307ac20c53a5619dbe439d4d47bf3 100644 (file)
  *    Defaults to flushing at tlb_end_vma() to reset the range; helps when
  *    there's large holes between the VMAs.
  *
+ *  - tlb_free_vma()
+ *
+ *    tlb_free_vma() marks the start of unlinking the vma and freeing
+ *    page-tables.
+ *
  *  - tlb_remove_table()
  *
  *    tlb_remove_table() is the basic primitive to free page-table directories
@@ -400,7 +405,10 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
         * Do not reset mmu_gather::vma_* fields here, we do not
         * call into tlb_start_vma() again to set them if there is an
         * intermediate flush.
+        *
+        * Except for vma_pfn, that only cares if there's pending TLBI.
         */
+       tlb->vma_pfn = 0;
 }
 
 #ifdef CONFIG_MMU_GATHER_NO_RANGE
@@ -465,7 +473,12 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
         */
        tlb->vma_huge = is_vm_hugetlb_page(vma);
        tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
-       tlb->vma_pfn  = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
+
+       /*
+        * Track if there's at least one VM_PFNMAP/VM_MIXEDMAP vma
+        * in the tracked range, see tlb_free_vma().
+        */
+       tlb->vma_pfn |= !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
 }
 
 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
@@ -564,23 +577,39 @@ static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *
 }
 
 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+       if (tlb->fullmm || IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS))
+               return;
+
+       /*
+        * Do a TLB flush and reset the range at VMA boundaries; this avoids
+        * the ranges growing with the unused space between consecutive VMAs,
+        * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
+        * this.
+        */
+       tlb_flush_mmu_tlbonly(tlb);
+}
+
+static inline void tlb_free_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
 {
        if (tlb->fullmm)
                return;
 
        /*
         * VM_PFNMAP is more fragile because the core mm will not track the
-        * page mapcount -- there might not be page-frames for these PFNs after
-        * all. Force flush TLBs for such ranges to avoid munmap() vs
-        * unmap_mapping_range() races.
+        * page mapcount -- there might not be page-frames for these PFNs
+        * after all.
+        *
+        * Specifically() there is a race between munmap() and
+        * unmap_mapping_range(), where munmap() will unlink the VMA, such
+        * that unmap_mapping_range() will no longer observe the VMA and
+        * no-op, without observing the TLBI, returning prematurely.
+        *
+        * So if we're about to unlink such a VMA, and we have pending
+        * TLBI for such a vma, flush things now.
         */
-       if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
-               /*
-                * Do a TLB flush and reset the range at VMA boundaries; this avoids
-                * the ranges growing with the unused space between consecutive VMAs.
-                */
+       if ((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && tlb->vma_pfn)
                tlb_flush_mmu_tlbonly(tlb);
-       }
 }
 
 /*
index b9661ccfa64fd6541fb77b8cb366ad15a73951d7..f3a041222ab89d255102207815763c97c4029c10 100644 (file)
@@ -378,6 +378,7 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
                if (unlikely(xa_is_zero(next)))
                        next = NULL;
 
+               tlb_free_vma(tlb, vma);
                /*
                 * Hide vma from rmap and truncate_pagecache before freeing
                 * pgtables
@@ -403,6 +404,7 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
                                next = mas_find(mas, ceiling - 1);
                                if (unlikely(xa_is_zero(next)))
                                        next = NULL;
+                               tlb_free_vma(tlb, vma);
                                if (mm_wr_locked)
                                        vma_start_write(vma);
                                unlink_anon_vmas(vma);