]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: remove arch_flush_lazy_mmu_mode()
authorKevin Brodsky <kevin.brodsky@arm.com>
Mon, 8 Sep 2025 07:39:25 +0000 (08:39 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:26:02 +0000 (17:26 -0700)
Patch series "Nesting support for lazy MMU mode", v2.

When the lazy MMU mode was introduced eons ago, it wasn't made clear
whether such a sequence was legal:

arch_enter_lazy_mmu_mode()
...
arch_enter_lazy_mmu_mode()
...
arch_leave_lazy_mmu_mode()
...
arch_leave_lazy_mmu_mode()

It seems fair to say that nested calls to
arch_{enter,leave}_lazy_mmu_mode() were not expected, and most
architectures never explicitly supported it.

Ryan Roberts' series from March [1] attempted to prevent nesting from ever
occurring, and mostly succeeded.  Unfortunately, a corner case
(DEBUG_PAGEALLOC) may still cause nesting to occur on arm64.  Ryan
proposed [2] to address that corner case at the generic level but this
approach received pushback; [3] then attempted to solve the issue on arm64
only, but it was deemed too fragile.

It feels generally fragile to rely on lazy_mmu sections not to nest,
because callers of various standard mm functions do not know if the
function uses lazy_mmu itself.  This series therefore performs a U-turn
and adds support for nested lazy_mmu sections, on all architectures.

The main change enabling nesting is patch 2, following the approach
suggested by Catalin Marinas [4]: have enter() return some state and the
matching leave() take that state.  In this series, the state is only used
to handle nesting, but it could be used for other purposes such as
restoring context modified by enter(); the proposed kpkeys framework would
be an immediate user [5].

This patch (of 7):

This function has only ever been used in arch/x86, so there is no need for
other architectures to implement it.  Remove it from linux/pgtable.h and
all architectures besides x86.

The arm64 implementation is not empty but it is only called from
arch_leave_lazy_mmu_mode(), so we can simply fold it there.

Link: https://lkml.kernel.org/r/20250908073931.4159362-1-kevin.brodsky@arm.com
Link: https://lkml.kernel.org/r/20250908073931.4159362-2-kevin.brodsky@arm.com
Link: https://lore.kernel.org/all/20250303141542.3371656-1-ryan.roberts@arm.com/
Link: https://lore.kernel.org/all/20250530140446.2387131-1-ryan.roberts@arm.com/
Link: https://lore.kernel.org/all/20250606135654.178300-1-ryan.roberts@arm.com/
Link: https://lore.kernel.org/all/aEhKSq0zVaUJkomX@arm.com/
Link: https://lore.kernel.org/linux-hardening/20250815085512.2182322-19-kevin.brodsky@arm.com/
Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Reviewed-by: Yeoreum Yun <yeoreum.yun@arm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: David S. Miller <davem@davemloft.net>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Juegren Gross <jgross@suse.com>
Cc: levi.yun <yeoreum.yun@arm.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/arm64/include/asm/pgtable.h
arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
arch/sparc/include/asm/tlbflush_64.h
arch/x86/include/asm/pgtable.h
include/linux/pgtable.h

index abd2dee416b3b33f73ae5d31555b466cf4e170fd..728d7b6ed20aef1152f7c1d46681013b1790c46e 100644 (file)
@@ -101,21 +101,14 @@ static inline void arch_enter_lazy_mmu_mode(void)
        set_thread_flag(TIF_LAZY_MMU);
 }
 
-static inline void arch_flush_lazy_mmu_mode(void)
+static inline void arch_leave_lazy_mmu_mode(void)
 {
        if (in_interrupt())
                return;
 
        if (test_and_clear_thread_flag(TIF_LAZY_MMU_PENDING))
                emit_pte_barriers();
-}
-
-static inline void arch_leave_lazy_mmu_mode(void)
-{
-       if (in_interrupt())
-               return;
 
-       arch_flush_lazy_mmu_mode();
        clear_thread_flag(TIF_LAZY_MMU);
 }
 
index 146287d9580f491fea0ef89d20eb1f9afcebed13..176d7fd79eeb0c4f0d4473cdd89449ab02b8845a 100644 (file)
@@ -55,8 +55,6 @@ static inline void arch_leave_lazy_mmu_mode(void)
        preempt_enable();
 }
 
-#define arch_flush_lazy_mmu_mode()      do {} while (0)
-
 extern void hash__tlbiel_all(unsigned int action);
 
 extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
index 8b8cdaa6927209eae6e523315ca29ab725d87092..cd144eb31bdd21dbf5efd1be28e273690deae12d 100644 (file)
@@ -44,7 +44,6 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 void flush_tlb_pending(void);
 void arch_enter_lazy_mmu_mode(void);
 void arch_leave_lazy_mmu_mode(void);
-#define arch_flush_lazy_mmu_mode()      do {} while (0)
 
 /* Local cpu only.  */
 void __flush_tlb_all(void);
index e33df3da698043aaa275f3f875bbf97ea8db5703..14fd672bc9b2df6e24bd376800a4adeab616bc3b 100644 (file)
@@ -117,7 +117,8 @@ extern pmdval_t early_pmd_flags;
 #define pte_val(x)     native_pte_val(x)
 #define __pte(x)       native_make_pte(x)
 
-#define arch_end_context_switch(prev)  do {} while(0)
+#define arch_end_context_switch(prev)  do {} while (0)
+#define arch_flush_lazy_mmu_mode()     do {} while (0)
 #endif /* CONFIG_PARAVIRT_XXL */
 
 static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
index 94249e671a7e8b85e440fc20ec1cb64690ec12d7..8d6007123cdfd86b499cb6a9ea79cc38bf8f892e 100644 (file)
@@ -234,7 +234,6 @@ static inline int pmd_dirty(pmd_t pmd)
 #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
 #define arch_enter_lazy_mmu_mode()     do {} while (0)
 #define arch_leave_lazy_mmu_mode()     do {} while (0)
-#define arch_flush_lazy_mmu_mode()     do {} while (0)
 #endif
 
 #ifndef pte_batch_hint