Once the lazy preemption is supported, it would be desirable to change
the preemption models at runtime. So add support for dynamic preemption
using DYNAMIC_KEY.
::Tested lightly on Power10 LPAR
Performance numbers indicate that, preempt=none(no dynamic) and
preempt=none(dynamic) are close.
cat /sys/kernel/debug/sched/preempt
(none) voluntary full lazy
perf stat -e probe:__cond_resched -a sleep 1
 Performance counter stats for 'system wide':
             1,253      probe:__cond_resched
echo full > /sys/kernel/debug/sched/preempt
cat /sys/kernel/debug/sched/preempt
none voluntary (full) lazy
perf stat -e probe:__cond_resched -a sleep 1
 Performance counter stats for 'system wide':
                 0      probe:__cond_resched
Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Link: https://patch.msgid.link/20250210184334.567383-2-sshegde@linux.ibm.com
        select HAVE_PERF_EVENTS_NMI             if PPC64
        select HAVE_PERF_REGS
        select HAVE_PERF_USER_STACK_DUMP
+       select HAVE_PREEMPT_DYNAMIC_KEY
        select HAVE_RETHOOK                     if KPROBES
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_RELIABLE_STACKTRACE
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_POWERPC_PREEMPT_H
+#define __ASM_POWERPC_PREEMPT_H
+
+#include <asm-generic/preempt.h>
+
+#if defined(CONFIG_PREEMPT_DYNAMIC)
+#include <linux/jump_label.h>
+DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+#define need_irq_preemption() \
+       (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
+#else
+#define need_irq_preemption()   (IS_ENABLED(CONFIG_PREEMPTION))
+#endif
+
+#endif /* __ASM_POWERPC_PREEMPT_H */
 
 unsigned long global_dbcr0[NR_CPUS];
 #endif
 
+#if defined(CONFIG_PREEMPT_DYNAMIC)
+DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+#endif
+
 #ifdef CONFIG_PPC_BOOK3S_64
 DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
 static inline bool exit_must_hard_disable(void)
                /* Returning to a kernel context with local irqs enabled. */
                WARN_ON_ONCE(!(regs->msr & MSR_EE));
 again:
-               if (IS_ENABLED(CONFIG_PREEMPTION)) {
+               if (need_irq_preemption()) {
                        /* Return to preemptible kernel context */
                        if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
                                if (preempt_count() == 0)
 
         * set and we are preemptible. The hack here is to schedule a
         * decrementer to fire here and reschedule for us if necessary.
         */
-       if (IS_ENABLED(CONFIG_PREEMPTION) && need_resched())
+       if (need_irq_preemption() && need_resched())
                set_dec(1);
        return 0;
 }