]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
arm64: entry: Refactor preempt_schedule_irq() check code
authorJinjie Ruan <ruanjinjie@huawei.com>
Fri, 15 Aug 2025 03:06:31 +0000 (11:06 +0800)
committerWill Deacon <will@kernel.org>
Thu, 11 Sep 2025 14:55:34 +0000 (15:55 +0100)
To align the structure of the code with irqentry_exit_cond_resched()
from the generic entry code, hoist the need_irq_preemption()
and IS_ENABLED() check earlier. And different preemption check functions
are defined based on whether dynamic preemption is enabled.

Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
Reviewed-by: Ada Couprie Diaz <ada.coupriediaz@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/include/asm/preempt.h
arch/arm64/kernel/entry-common.c

index 0159b625cc7f0e7d6996b34b4de8e71b04ca32e5..c2437ea0790f64a9933476cf71cea27340aaf216 100644 (file)
@@ -85,6 +85,7 @@ static inline bool should_resched(int preempt_offset)
 void preempt_schedule(void);
 void preempt_schedule_notrace(void);
 
+void raw_irqentry_exit_cond_resched(void);
 #ifdef CONFIG_PREEMPT_DYNAMIC
 
 DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
@@ -92,13 +93,18 @@ void dynamic_preempt_schedule(void);
 #define __preempt_schedule()           dynamic_preempt_schedule()
 void dynamic_preempt_schedule_notrace(void);
 #define __preempt_schedule_notrace()   dynamic_preempt_schedule_notrace()
+void dynamic_irqentry_exit_cond_resched(void);
+#define irqentry_exit_cond_resched()   dynamic_irqentry_exit_cond_resched()
 
 #else /* CONFIG_PREEMPT_DYNAMIC */
 
 #define __preempt_schedule()           preempt_schedule()
 #define __preempt_schedule_notrace()   preempt_schedule_notrace()
+#define irqentry_exit_cond_resched()   raw_irqentry_exit_cond_resched()
 
 #endif /* CONFIG_PREEMPT_DYNAMIC */
+#else /* CONFIG_PREEMPTION */
+#define irqentry_exit_cond_resched()   {}
 #endif /* CONFIG_PREEMPTION */
 
 #endif /* __ASM_PREEMPT_H */
index 1ba1d40fa6a7244ed4c9abeb222c3d25ab2fcff9..64066c643f97fe5359271a7bbec0ca3b75969ff9 100644 (file)
@@ -286,19 +286,8 @@ static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs,
                lockdep_hardirqs_on(CALLER_ADDR0);
 }
 
-#ifdef CONFIG_PREEMPT_DYNAMIC
-DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
-#define need_irq_preemption() \
-       (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
-#else
-#define need_irq_preemption()  (IS_ENABLED(CONFIG_PREEMPTION))
-#endif
-
 static inline bool arm64_preempt_schedule_irq(void)
 {
-       if (!need_irq_preemption())
-               return false;
-
        /*
         * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
         * priority masking is used the GIC irqchip driver will clear DAIF.IF
@@ -682,6 +671,26 @@ static __always_inline void __el1_pnmi(struct pt_regs *regs,
        arm64_exit_nmi(regs, state);
 }
 
+#ifdef CONFIG_PREEMPTION
+void raw_irqentry_exit_cond_resched(void)
+{
+       if (!preempt_count()) {
+               if (need_resched() && arm64_preempt_schedule_irq())
+                       preempt_schedule_irq();
+       }
+}
+#endif
+
+#ifdef CONFIG_PREEMPT_DYNAMIC
+DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+void dynamic_irqentry_exit_cond_resched(void)
+{
+       if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
+               return;
+       raw_irqentry_exit_cond_resched();
+}
+#endif
+
 static __always_inline void __el1_irq(struct pt_regs *regs,
                                      void (*handler)(struct pt_regs *))
 {
@@ -693,10 +702,8 @@ static __always_inline void __el1_irq(struct pt_regs *regs,
        do_interrupt_handler(regs, handler);
        irq_exit_rcu();
 
-       if (!preempt_count() && need_resched()) {
-               if (arm64_preempt_schedule_irq())
-                       preempt_schedule_irq();
-       }
+       if (IS_ENABLED(CONFIG_PREEMPTION))
+               irqentry_exit_cond_resched();
 
        exit_to_kernel_mode(regs, state);
 }