#ifdef CONFIG_FUNCTION_TRACER
extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
+ #ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
+ struct ftrace_ool_stub {
+ #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
+ struct ftrace_ops *ftrace_op;
+ #endif
+ u32 insn[4];
+ } __aligned(sizeof(unsigned long));
+ extern struct ftrace_ool_stub ftrace_ool_stub_text_end[], ftrace_ool_stub_text[],
+ ftrace_ool_stub_inittext[];
+ extern unsigned int ftrace_ool_stub_text_end_count, ftrace_ool_stub_text_count,
+ ftrace_ool_stub_inittext_count;
+ #endif
void ftrace_free_init_tramp(void);
unsigned long ftrace_call_adjust(unsigned long addr);
- struct pt_regs *regs = &fregs->regs;
+
+ #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ /*
+ * When an ftrace registered caller is tracing a function that is also set by a
+ * register_ftrace_direct() call, it needs to be differentiated in the
+ * ftrace_caller trampoline so that the direct call can be invoked after the
+ * other ftrace ops. To do this, place the direct caller in the orig_gpr3 field
+ * of pt_regs. This tells ftrace_caller that there's a direct caller.
+ */
+ static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsigned long addr)
+ {
++ struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs;
+
+ regs->orig_gpr3 = addr;
+ }
+ #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
#else
static inline void ftrace_free_init_tramp(void) { }
static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; }
#else /* CONFIG_PPC64 */
-/*
- * And here is the simpler 32 bits version
- */
struct vdso_arch_data {
- __u64 tb_ticks_per_sec; /* Timebase tics / sec 0x38 */
+ __u64 tb_ticks_per_sec; /* Timebase tics / sec */
__u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */
__u32 compat_syscall_map[0]; /* No compat syscalls on PPC32 */
- struct vdso_data data[CS_BASES];
struct vdso_rng_data rng_data;
+
+ struct vdso_data data[CS_BASES] __aligned(1 << CONFIG_PAGE_SHIFT);
};
#endif /* CONFIG_PPC64 */
BOOK3S_INTERRUPT_EXTERNAL, 0);
else
lpcr |= LPCR_MER;
+ } else {
+ /*
+ * L1's copy of L2's LPCR (vcpu->arch.vcore->lpcr) can get its MER bit
+ * unexpectedly set - for e.g. during NMI handling when all register
+ * states are synchronized from L0 to L1. L1 needs to inform L0 about
+ * MER=1 only when there are pending external interrupts.
+ * In the above if check, MER bit is set if there are pending
+ * external interrupts. Hence, explicity mask off MER bit
+ * here as otherwise it may generate spurious interrupts in L2 KVM
+ * causing an endless loop, which results in L2 guest getting hung.
+ */
+ lpcr &= ~LPCR_MER;
}
} else if (vcpu->arch.pending_exceptions ||
- vcpu->arch.doorbell_request ||
xive_interrupt_pending(vcpu)) {
vcpu->arch.ret = RESUME_HOST;
goto out;