trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
                                  int type, unsigned long len,
                                  unsigned long flags, int pc);
-void trace_buffer_unlock_commit(struct ring_buffer *buffer,
+void trace_buffer_unlock_commit(struct trace_array *tr,
+                               struct ring_buffer *buffer,
                                struct ring_buffer_event *event,
                                unsigned long flags, int pc);
-void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
+void trace_buffer_unlock_commit_regs(struct trace_array *tr,
+                                    struct ring_buffer *buffer,
                                     struct ring_buffer_event *event,
                                     unsigned long flags, int pc,
                                     struct pt_regs *regs);
        enum event_trigger_type tt = ETT_NONE;
 
        if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
-               trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
+               trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
 
        if (tt)
                event_triggers_post_call(file, tt);
        enum event_trigger_type tt = ETT_NONE;
 
        if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
-               trace_buffer_unlock_commit_regs(buffer, event,
+               trace_buffer_unlock_commit_regs(file->tr, buffer, event,
                                                irq_flags, pc, regs);
 
        if (tt)
 
                memcpy((void *) t + sizeof(*t), data, len);
 
                if (blk_tracer)
-                       trace_buffer_unlock_commit(buffer, event, 0, pc);
+                       trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
        }
 }
 
                        memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
 
                if (blk_tracer) {
-                       trace_buffer_unlock_commit(buffer, event, 0, pc);
+                       trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
                        return;
                }
        }
 
        ring_buffer_unlock_commit(buffer, event);
 }
 
-static inline void
-__trace_buffer_unlock_commit(struct ring_buffer *buffer,
-                            struct ring_buffer_event *event,
-                            unsigned long flags, int pc)
+void trace_buffer_unlock_commit(struct trace_array *tr,
+                               struct ring_buffer *buffer,
+                               struct ring_buffer_event *event,
+                               unsigned long flags, int pc)
 {
        __buffer_unlock_commit(buffer, event);
 
        ftrace_trace_stack(buffer, flags, 6, pc);
        ftrace_trace_userstack(buffer, flags, pc);
 }
-
-void trace_buffer_unlock_commit(struct ring_buffer *buffer,
-                               struct ring_buffer_event *event,
-                               unsigned long flags, int pc)
-{
-       __trace_buffer_unlock_commit(buffer, event, flags, pc);
-}
 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
 
 static struct ring_buffer *temp_buffer;
 }
 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
 
-void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
+void trace_buffer_unlock_commit_regs(struct trace_array *tr,
+                                    struct ring_buffer *buffer,
                                     struct ring_buffer_event *event,
                                     unsigned long flags, int pc,
                                     struct pt_regs *regs)
 
 
 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
 
-static void
+static struct trace_array *event_tr;
+
+static void __init
 function_test_events_call(unsigned long ip, unsigned long parent_ip,
                          struct ftrace_ops *op, struct pt_regs *pt_regs)
 {
        entry->ip                       = ip;
        entry->parent_ip                = parent_ip;
 
-       trace_buffer_unlock_commit(buffer, event, flags, pc);
+       trace_buffer_unlock_commit(event_tr, buffer, event, flags, pc);
 
  out:
        atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
                return;
        }
        pr_info("Running tests again, along with the function tracer\n");
+       event_tr = top_trace_array();
+       if (WARN_ON(!event_tr))
+               return;
        event_trace_self_tests();
        unregister_ftrace_function(&trace_ops);
 }
 
        entry->rw                       = *rw;
 
        if (!call_filter_check_discard(call, entry, buffer, event))
-               trace_buffer_unlock_commit(buffer, event, 0, pc);
+               trace_buffer_unlock_commit(tr, buffer, event, 0, pc);
 }
 
 void mmio_trace_rw(struct mmiotrace_rw *rw)
        entry->map                      = *map;
 
        if (!call_filter_check_discard(call, entry, buffer, event))
-               trace_buffer_unlock_commit(buffer, event, 0, pc);
+               trace_buffer_unlock_commit(tr, buffer, event, 0, pc);
 }
 
 void mmio_trace_mapping(struct mmiotrace_map *map)
 
        entry->next_cpu = task_cpu(next);
 
        if (!call_filter_check_discard(call, entry, buffer, event))
-               trace_buffer_unlock_commit(buffer, event, flags, pc);
+               trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
 }
 
 static void
        entry->next_cpu                 = task_cpu(wakee);
 
        if (!call_filter_check_discard(call, entry, buffer, event))
-               trace_buffer_unlock_commit(buffer, event, flags, pc);
+               trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
 }
 
 static void notrace