struct module;
 struct dyn_ftrace;
+struct ftrace_ops;
 
 bool ftrace_need_init_nop(void);
 #define ftrace_need_init_nop ftrace_need_init_nop
        return !strcmp(sym + 7, name) || !strcmp(sym + 8, name);
 }
 
+void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
+                      struct ftrace_ops *op, struct ftrace_regs *fregs);
+#define ftrace_graph_func ftrace_graph_func
+
 #endif /* __ASSEMBLY__ */
 
 #ifdef CONFIG_FUNCTION_TRACER
 
 }
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-/*
- * Hook the return address and push it in the stack of return addresses
- * in current thread info.
- */
-unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp,
-                                   unsigned long ip)
-{
-       if (unlikely(ftrace_graph_is_dead()))
-               goto out;
-       if (unlikely(atomic_read(¤t->tracing_graph_pause)))
-               goto out;
-       ip -= MCOUNT_INSN_SIZE;
-       if (!function_graph_enter(ra, ip, 0, (void *) sp))
-               ra = (unsigned long) return_to_handler;
-out:
-       return ra;
-}
-NOKPROBE_SYMBOL(prepare_ftrace_return);
 
-/*
- * Patch the kernel code at ftrace_graph_caller location. The instruction
- * there is branch relative on condition. To enable the ftrace graph code
- * block, we simply patch the mask field of the instruction to zero and
- * turn the instruction into a nop.
- * To disable the ftrace graph code the mask field will be patched to
- * all ones, which turns the instruction into an unconditional branch.
- */
-int ftrace_enable_ftrace_graph_caller(void)
+void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
+                      struct ftrace_ops *op, struct ftrace_regs *fregs)
 {
-       /* Expect brc 0xf,... */
-       return ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false);
-}
+       unsigned long *parent = &arch_ftrace_regs(fregs)->regs.gprs[14];
+       int bit;
 
-int ftrace_disable_ftrace_graph_caller(void)
-{
-       /* Expect brc 0x0,... */
-       return ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true);
+       if (unlikely(ftrace_graph_is_dead()))
+               return;
+       if (unlikely(atomic_read(¤t->tracing_graph_pause)))
+               return;
+       bit = ftrace_test_recursion_trylock(ip, *parent);
+       if (bit < 0)
+               return;
+       if (!function_graph_enter_regs(*parent, ip, 0, parent, fregs))
+               *parent = (unsigned long)&return_to_handler;
+       ftrace_test_recursion_unlock(bit);
 }
 
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
        lgr     %r3,%r14
        la      %r5,STACK_FREGS(%r15)
        BASR_EX %r14,%r1
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-# The j instruction gets runtime patched to a nop instruction.
-# See ftrace_enable_ftrace_graph_caller.
-SYM_INNER_LABEL(ftrace_graph_caller, SYM_L_GLOBAL)
-       j       .Lftrace_graph_caller_end
-       lmg     %r2,%r3,(STACK_FREGS_PTREGS_GPRS+14*8)(%r15)
-       lg      %r4,(STACK_FREGS_PTREGS_PSW+8)(%r15)
-       brasl   %r14,prepare_ftrace_return
-       stg     %r2,(STACK_FREGS_PTREGS_GPRS+14*8)(%r15)
-.Lftrace_graph_caller_end:
-#endif
        lg      %r0,(STACK_FREGS_PTREGS_PSW+8)(%r15)
 #ifdef MARCH_HAS_Z196_FEATURES
        ltg     %r1,STACK_FREGS_PTREGS_ORIG_GPR2(%r15)