From: Ada Couprie Diaz Date: Mon, 7 Jul 2025 11:41:02 +0000 (+0100) Subject: arm64: entry: Add entry and exit functions for debug exceptions X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=eaff68b3286116d499a3d4e513a36d772faba587;p=users%2Fhch%2Fmisc.git arm64: entry: Add entry and exit functions for debug exceptions Move the `debug_exception_enter()` and `debug_exception_exit()` functions from mm/fault.c, as they are needed to split the debug exceptions entry paths from the current unified one. Make them externally visible in include/asm/exception.h until the caller in mm/fault.c is cleaned up. Signed-off-by: Ada Couprie Diaz Tested-by: Luis Claudio R. Goncalves Reviewed-by: Anshuman Khandual Reviewed-by: Will Deacon Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20250707114109.35672-7-ada.coupriediaz@arm.com Signed-off-by: Will Deacon --- diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index d48fc16584cd..e54b5466fd2c 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -80,4 +80,8 @@ void do_serror(struct pt_regs *regs, unsigned long esr); void do_signal(struct pt_regs *regs); void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far); + +void debug_exception_enter(struct pt_regs *regs); +void debug_exception_exit(struct pt_regs *regs); + #endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 7c1970b341b8..3bdfa5abaf7a 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -441,6 +441,28 @@ static __always_inline void fpsimd_syscall_exit(void) __this_cpu_write(fpsimd_last_state.to_save, FP_STATE_CURRENT); } +/* + * In debug exception context, we explicitly disable preemption despite + * having interrupts disabled. + * This serves two purposes: it makes it much less likely that we would + * accidentally schedule in exception context and it will force a warning + * if we somehow manage to schedule by accident. + */ +void debug_exception_enter(struct pt_regs *regs) +{ + preempt_disable(); + + /* This code is a bit fragile. Test it. */ + RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work"); +} +NOKPROBE_SYMBOL(debug_exception_enter); + +void debug_exception_exit(struct pt_regs *regs) +{ + preempt_enable_no_resched(); +} +NOKPROBE_SYMBOL(debug_exception_exit); + UNHANDLED(el1t, 64, sync) UNHANDLED(el1t, 64, irq) UNHANDLED(el1t, 64, fiq) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index ec0a337891dd..d451d7d834f1 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -966,28 +966,6 @@ void __init hook_debug_fault_code(int nr, debug_fault_info[nr].name = name; } -/* - * In debug exception context, we explicitly disable preemption despite - * having interrupts disabled. - * This serves two purposes: it makes it much less likely that we would - * accidentally schedule in exception context and it will force a warning - * if we somehow manage to schedule by accident. - */ -static void debug_exception_enter(struct pt_regs *regs) -{ - preempt_disable(); - - /* This code is a bit fragile. Test it. */ - RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work"); -} -NOKPROBE_SYMBOL(debug_exception_enter); - -static void debug_exception_exit(struct pt_regs *regs) -{ - preempt_enable_no_resched(); -} -NOKPROBE_SYMBOL(debug_exception_exit); - void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr, struct pt_regs *regs) {