return;
        }
 
-       if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) {
+       if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY) {
                *parent = old;
                return;
        }
 
                goto out;
        if (unlikely(atomic_read(¤t->tracing_graph_pause)))
                goto out;
-       if (ftrace_push_return_trace(parent, ip, &trace.depth) == -EBUSY)
+       if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
                goto out;
        trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN;
        /* Only trace if the calling function expects to. */
 
        select HAVE_DYNAMIC_FTRACE
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_GRAPH_TRACER
+       select HAVE_FUNCTION_GRAPH_FP_TEST
        select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
        select HAVE_FTRACE_SYSCALLS
 
        pushl %edx
        movl 0xc(%esp), %edx
        lea 0x4(%ebp), %eax
+       movl (%ebp), %ecx
        subl $MCOUNT_INSN_SIZE, %edx
        call prepare_ftrace_return
        popl %edx
        pushl %eax
        pushl %ecx
        pushl %edx
+       movl %ebp, %eax
        call ftrace_return_to_handler
        movl %eax, 0xc(%esp)
        popl %edx
 
 
        leaq 8(%rbp), %rdi
        movq 0x38(%rsp), %rsi
+       movq (%rbp), %rdx
        subq $MCOUNT_INSN_SIZE, %rsi
 
        call    prepare_ftrace_return
        /* Save the return values */
        movq %rax, (%rsp)
        movq %rdx, 8(%rsp)
+       movq %rbp, %rdi
 
        call ftrace_return_to_handler
 
 
  * Hook the return address and push it in the stack of return addrs
  * in current thread info.
  */
-void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+                          unsigned long frame_pointer)
 {
        unsigned long old;
        int faulted;
                return;
        }
 
-       if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) {
+       if (ftrace_push_return_trace(old, self_addr, &trace.depth,
+                   frame_pointer) == -EBUSY) {
                *parent = old;
                return;
        }
 
        unsigned long func;
        unsigned long long calltime;
        unsigned long long subtime;
+       unsigned long fp;
 };
 
 /*
 extern void return_to_handler(void);
 
 extern int
-ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth);
+ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
+                        unsigned long frame_pointer);
 
 /*
  * Sometimes we don't want to trace a function with the function
 
 config HAVE_FUNCTION_GRAPH_TRACER
        bool
 
+config HAVE_FUNCTION_GRAPH_FP_TEST
+       bool
+       help
+        An arch may pass in a unique value (frame pointer) to both the
+        entering and exiting of a function. On exit, the value is compared
+        and if it does not match, then it will panic the kernel.
+
 config HAVE_FUNCTION_TRACE_MCOUNT_TEST
        bool
        help
 
 
 /* Add a function return address to the trace stack on thread info.*/
 int
-ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
+ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
+                        unsigned long frame_pointer)
 {
        unsigned long long calltime;
        int index;
        current->ret_stack[index].func = func;
        current->ret_stack[index].calltime = calltime;
        current->ret_stack[index].subtime = 0;
+       current->ret_stack[index].fp = frame_pointer;
        *depth = index;
 
        return 0;
 
 /* Retrieve a function return address to the trace stack on thread info.*/
 static void
-ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
+ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
+                       unsigned long frame_pointer)
 {
        int index;
 
                return;
        }
 
+#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
+       /*
+        * The arch may choose to record the frame pointer used
+        * and check it here to make sure that it is what we expect it
+        * to be. If gcc does not set the place holder of the return
+        * address in the frame pointer, and does a copy instead, then
+        * the function graph trace will fail. This test detects this
+        * case.
+        *
+        * Currently, x86_32 with optimize for size (-Os) makes the latest
+        * gcc do the above.
+        */
+       if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
+               ftrace_graph_stop();
+               WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
+                    "  from func %pF return to %lx\n",
+                    current->ret_stack[index].fp,
+                    frame_pointer,
+                    (void *)current->ret_stack[index].func,
+                    current->ret_stack[index].ret);
+               *ret = (unsigned long)panic;
+               return;
+       }
+#endif
+
        *ret = current->ret_stack[index].ret;
        trace->func = current->ret_stack[index].func;
        trace->calltime = current->ret_stack[index].calltime;
  * Send the trace to the ring-buffer.
  * @return the original return address.
  */
-unsigned long ftrace_return_to_handler(void)
+unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
 {
        struct ftrace_graph_ret trace;
        unsigned long ret;
 
-       ftrace_pop_return_trace(&trace, &ret);
+       ftrace_pop_return_trace(&trace, &ret, frame_pointer);
        trace.rettime = trace_clock_local();
        ftrace_graph_return(&trace);
        barrier();