]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86/xen: support nested lazy_mmu sections (again)
authorKevin Brodsky <kevin.brodsky@arm.com>
Mon, 8 Sep 2025 07:39:28 +0000 (08:39 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:26:03 +0000 (17:26 -0700)
Commit 49147beb0ccb ("x86/xen: allow nesting of same lazy mode")
originally introduced support for nested lazy sections (LAZY_MMU and
LAZY_CPU).  It later got reverted by commit c36549ff8d84 as its
implementation turned out to be intolerant to preemption.

Now that the lazy_mmu API allows enter() to pass through a state to the
matching leave() call, we can support nesting again for the LAZY_MMU mode
in a preemption-safe manner.  If xen_enter_lazy_mmu() is called inside an
active lazy_mmu section, xen_lazy_mode will already be set to XEN_LAZY_MMU
and we can then return LAZY_MMU_NESTED to instruct the matching
xen_leave_lazy_mmu() call to leave xen_lazy_mode unchanged.

The only effect of this patch is to ensure that xen_lazy_mode remains set
to XEN_LAZY_MMU until the outermost lazy_mmu section ends.
xen_leave_lazy_mmu() still calls xen_mc_flush() unconditionally.

Link: https://lkml.kernel.org/r/20250908073931.4159362-5-kevin.brodsky@arm.com
Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: David Hildenbrand <david@redhat.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Juegren Gross <jgross@suse.com>
Cc: levi.yun <yeoreum.yun@arm.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/xen/mmu_pv.c

index 65a0d394fba1864e629588074a2e064c6b5bebb7..4ecd3a6b1deacfe3c56ebb72e5cfaf43ca8d43e8 100644 (file)
@@ -529,14 +529,12 @@ static inline void arch_end_context_switch(struct task_struct *next)
 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
 static inline lazy_mmu_state_t arch_enter_lazy_mmu_mode(void)
 {
-       PVOP_VCALL0(mmu.lazy_mode.enter);
-
-       return LAZY_MMU_DEFAULT;
+       return PVOP_CALL0(lazy_mmu_state_t, mmu.lazy_mode.enter);
 }
 
 static inline void arch_leave_lazy_mmu_mode(lazy_mmu_state_t state)
 {
-       PVOP_VCALL0(mmu.lazy_mode.leave);
+       PVOP_VCALL1(mmu.lazy_mode.leave, state);
 }
 
 static inline void arch_flush_lazy_mmu_mode(void)
index bc1af86868a3f7a6e71247493df12eec23d4a50d..b7c567ccbf32b75aec192e7cb9d1cd01e6d5350e 100644 (file)
@@ -45,8 +45,8 @@ typedef int lazy_mmu_state_t;
 
 struct pv_lazy_ops {
        /* Set deferred update mode, used for batching operations. */
-       void (*enter)(void);
-       void (*leave)(void);
+       lazy_mmu_state_t (*enter)(void);
+       void (*leave)(lazy_mmu_state_t);
        void (*flush)(void);
 } __no_randomize_layout;
 #endif
index 2039d5132ca37400ab3acfe3220ef16b053e2200..6e5390ff06a58fd579cd3425d849ff052624aa5d 100644 (file)
@@ -2130,9 +2130,13 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
 #endif
 }
 
-static void xen_enter_lazy_mmu(void)
+static lazy_mmu_state_t xen_enter_lazy_mmu(void)
 {
+       if (this_cpu_read(xen_lazy_mode) == XEN_LAZY_MMU)
+               return LAZY_MMU_NESTED;
+
        enter_lazy(XEN_LAZY_MMU);
+       return LAZY_MMU_DEFAULT;
 }
 
 static void xen_flush_lazy_mmu(void)
@@ -2167,11 +2171,12 @@ static void __init xen_post_allocator_init(void)
        pv_ops.mmu.write_cr3 = &xen_write_cr3;
 }
 
-static void xen_leave_lazy_mmu(void)
+static void xen_leave_lazy_mmu(lazy_mmu_state_t state)
 {
        preempt_disable();
        xen_mc_flush();
-       leave_lazy(XEN_LAZY_MMU);
+       if (state != LAZY_MMU_NESTED)
+               leave_lazy(XEN_LAZY_MMU);
        preempt_enable();
 }