int ring_buffer_write(struct ring_buffer *buffer,
                      unsigned long length, void *data);
 
+void ring_buffer_nest_start(struct ring_buffer *buffer);
+void ring_buffer_nest_end(struct ring_buffer *buffer);
+
 struct ring_buffer_event *
 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
                 unsigned long *lost_events);
 
        struct buffer_page              *reader_page;
        unsigned long                   lost_events;
        unsigned long                   last_overrun;
+       unsigned long                   nest;
        local_t                         entries_bytes;
        local_t                         entries;
        local_t                         overrun;
                bit = pc & NMI_MASK ? RB_CTX_NMI :
                        pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
 
-       if (unlikely(val & (1 << bit)))
+       if (unlikely(val & (1 << (bit + cpu_buffer->nest))))
                return 1;
 
-       val |= (1 << bit);
+       val |= (1 << (bit + cpu_buffer->nest));
        cpu_buffer->current_context = val;
 
        return 0;
 static __always_inline void
 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
 {
-       cpu_buffer->current_context &= cpu_buffer->current_context - 1;
+       cpu_buffer->current_context &=
+               cpu_buffer->current_context - (1 << cpu_buffer->nest);
+}
+
+/* The recursive locking above uses 4 bits */
+#define NESTED_BITS 4
+
+/**
+ * ring_buffer_nest_start - Allow to trace while nested
+ * @buffer: The ring buffer to modify
+ *
+ * The ring buffer has a safty mechanism to prevent recursion.
+ * But there may be a case where a trace needs to be done while
+ * tracing something else. In this case, calling this function
+ * will allow this function to nest within a currently active
+ * ring_buffer_lock_reserve().
+ *
+ * Call this function before calling another ring_buffer_lock_reserve() and
+ * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
+ */
+void ring_buffer_nest_start(struct ring_buffer *buffer)
+{
+       struct ring_buffer_per_cpu *cpu_buffer;
+       int cpu;
+
+       /* Enabled by ring_buffer_nest_end() */
+       preempt_disable_notrace();
+       cpu = raw_smp_processor_id();
+       cpu_buffer = buffer->buffers[cpu];
+       /* This is the shift value for the above recusive locking */
+       cpu_buffer->nest += NESTED_BITS;
+}
+
+/**
+ * ring_buffer_nest_end - Allow to trace while nested
+ * @buffer: The ring buffer to modify
+ *
+ * Must be called after ring_buffer_nest_start() and after the
+ * ring_buffer_unlock_commit().
+ */
+void ring_buffer_nest_end(struct ring_buffer *buffer)
+{
+       struct ring_buffer_per_cpu *cpu_buffer;
+       int cpu;
+
+       /* disabled by ring_buffer_nest_start() */
+       cpu = raw_smp_processor_id();
+       cpu_buffer = buffer->buffers[cpu];
+       /* This is the shift value for the above recusive locking */
+       cpu_buffer->nest -= NESTED_BITS;
+       preempt_enable_notrace();
 }
 
 /**