__ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
 }
 
+static DEFINE_PER_CPU(int, user_stack_count);
+
 void
 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
 {
        if (unlikely(in_nmi()))
                return;
 
+       /*
+        * prevent recursion, since the user stack tracing may
+        * trigger other kernel events.
+        */
+       preempt_disable();
+       if (__this_cpu_read(user_stack_count))
+               goto out;
+
+       __this_cpu_inc(user_stack_count);
+
+
+
        event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
                                          sizeof(*entry), flags, pc);
        if (!event)
        save_stack_trace_user(&trace);
        if (!filter_check_discard(call, entry, buffer, event))
                ring_buffer_unlock_commit(buffer, event);
+
+       __this_cpu_dec(user_stack_count);
+
+ out:
+       preempt_enable();
 }
 
 #ifdef UNUSED