* Defaults to flushing at tlb_end_vma() to reset the range; helps when
* there's large holes between the VMAs.
*
+ * - tlb_free_vma()
+ *
+ * tlb_free_vma() marks the start of unlinking the vma and freeing
+ * page-tables.
+ *
* - tlb_remove_table()
*
* tlb_remove_table() is the basic primitive to free page-table directories
* Do not reset mmu_gather::vma_* fields here, we do not
* call into tlb_start_vma() again to set them if there is an
* intermediate flush.
+ *
+ * Except for vma_pfn, that only cares if there's pending TLBI.
*/
+ tlb->vma_pfn = 0;
}
#ifdef CONFIG_MMU_GATHER_NO_RANGE
*/
tlb->vma_huge = is_vm_hugetlb_page(vma);
tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
- tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
+
+ /*
+ * Track if there's at least one VM_PFNMAP/VM_MIXEDMAP vma
+ * in the tracked range, see tlb_free_vma().
+ */
+ tlb->vma_pfn |= !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
}
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
}
static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+ if (tlb->fullmm || IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS))
+ return;
+
+ /*
+ * Do a TLB flush and reset the range at VMA boundaries; this avoids
+ * the ranges growing with the unused space between consecutive VMAs,
+ * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
+ * this.
+ */
+ tlb_flush_mmu_tlbonly(tlb);
+}
+
+static inline void tlb_free_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (tlb->fullmm)
return;
/*
* VM_PFNMAP is more fragile because the core mm will not track the
- * page mapcount -- there might not be page-frames for these PFNs after
- * all. Force flush TLBs for such ranges to avoid munmap() vs
- * unmap_mapping_range() races.
+ * page mapcount -- there might not be page-frames for these PFNs
+ * after all.
+ *
+ * Specifically() there is a race between munmap() and
+ * unmap_mapping_range(), where munmap() will unlink the VMA, such
+ * that unmap_mapping_range() will no longer observe the VMA and
+ * no-op, without observing the TLBI, returning prematurely.
+ *
+ * So if we're about to unlink such a VMA, and we have pending
+ * TLBI for such a vma, flush things now.
*/
- if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
- /*
- * Do a TLB flush and reset the range at VMA boundaries; this avoids
- * the ranges growing with the unused space between consecutive VMAs.
- */
+ if ((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && tlb->vma_pfn)
tlb_flush_mmu_tlbonly(tlb);
- }
}
/*