If unsure, say N
 
+config RING_BUFFER_VALIDATE_TIME_DELTAS
+       bool "Verify ring buffer time stamp deltas"
+       depends on RING_BUFFER
+       help
+         This will audit the time stamps on the ring buffer sub
+         buffer to make sure that all the time deltas for the
+         events on a sub buffer matches the current time stamp.
+         This audit is performed for every event that is not
+         interrupted, or interrupting another event. A check
+         is also made when traversing sub buffers to make sure
+         that all the deltas on the previous sub buffer do not
+         add up to be greater than the current time stamp.
+
+         NOTE: This adds significant overhead to recording of events,
+         and should only be used to test the logic of the ring buffer.
+         Do not use it on production systems.
+
+         Only say Y if you understand what this does, and you
+         still want it enabled. Otherwise say N
+
 config MMIOTRACE_TEST
        tristate "Test module for mmiotrace"
        depends on MMIOTRACE && m
 
 }
 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
 
+/* Special value to validate all deltas on a page. */
+#define CHECK_FULL_PAGE                1L
+
+#ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS
+static void dump_buffer_page(struct buffer_data_page *bpage,
+                            struct rb_event_info *info,
+                            unsigned long tail)
+{
+       struct ring_buffer_event *event;
+       u64 ts, delta;
+       int e;
+
+       ts = bpage->time_stamp;
+       pr_warn("  [%lld] PAGE TIME STAMP\n", ts);
+
+       for (e = 0; e < tail; e += rb_event_length(event)) {
+
+               event = (struct ring_buffer_event *)(bpage->data + e);
+
+               switch (event->type_len) {
+
+               case RINGBUF_TYPE_TIME_EXTEND:
+                       delta = ring_buffer_event_time_stamp(event);
+                       ts += delta;
+                       pr_warn("  [%lld] delta:%lld TIME EXTEND\n", ts, delta);
+                       break;
+
+               case RINGBUF_TYPE_TIME_STAMP:
+                       delta = ring_buffer_event_time_stamp(event);
+                       ts = delta;
+                       pr_warn("  [%lld] absolute:%lld TIME STAMP\n", ts, delta);
+                       break;
+
+               case RINGBUF_TYPE_PADDING:
+                       ts += event->time_delta;
+                       pr_warn("  [%lld] delta:%d PADDING\n", ts, event->time_delta);
+                       break;
+
+               case RINGBUF_TYPE_DATA:
+                       ts += event->time_delta;
+                       pr_warn("  [%lld] delta:%d\n", ts, event->time_delta);
+                       break;
+
+               default:
+                       break;
+               }
+       }
+}
+
+static DEFINE_PER_CPU(atomic_t, checking);
+static atomic_t ts_dump;
+
+/*
+ * Check if the current event time stamp matches the deltas on
+ * the buffer page.
+ */
+static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
+                        struct rb_event_info *info,
+                        unsigned long tail)
+{
+       struct ring_buffer_event *event;
+       struct buffer_data_page *bpage;
+       u64 ts, delta;
+       bool full = false;
+       int e;
+
+       bpage = info->tail_page->page;
+
+       if (tail == CHECK_FULL_PAGE) {
+               full = true;
+               tail = local_read(&bpage->commit);
+       } else if (info->add_timestamp &
+                  (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) {
+               /* Ignore events with absolute time stamps */
+               return;
+       }
+
+       /*
+        * Do not check the first event (skip possible extends too).
+        * Also do not check if previous events have not been committed.
+        */
+       if (tail <= 8 || tail > local_read(&bpage->commit))
+               return;
+
+       /*
+        * If this interrupted another event, 
+        */
+       if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
+               goto out;
+
+       ts = bpage->time_stamp;
+
+       for (e = 0; e < tail; e += rb_event_length(event)) {
+
+               event = (struct ring_buffer_event *)(bpage->data + e);
+
+               switch (event->type_len) {
+
+               case RINGBUF_TYPE_TIME_EXTEND:
+                       delta = ring_buffer_event_time_stamp(event);
+                       ts += delta;
+                       break;
+
+               case RINGBUF_TYPE_TIME_STAMP:
+                       delta = ring_buffer_event_time_stamp(event);
+                       ts = delta;
+                       break;
+
+               case RINGBUF_TYPE_PADDING:
+                       if (event->time_delta == 1)
+                               break;
+                       /* fall through */
+               case RINGBUF_TYPE_DATA:
+                       ts += event->time_delta;
+                       break;
+
+               default:
+                       RB_WARN_ON(cpu_buffer, 1);
+               }
+       }
+       if ((full && ts > info->ts) ||
+           (!full && ts + info->delta != info->ts)) {
+               /* If another report is happening, ignore this one */
+               if (atomic_inc_return(&ts_dump) != 1) {
+                       atomic_dec(&ts_dump);
+                       goto out;
+               }
+               atomic_inc(&cpu_buffer->record_disabled);
+               pr_warn("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld after:%lld\n",
+                      cpu_buffer->cpu,
+                      ts + info->delta, info->ts, info->delta, info->after);
+               dump_buffer_page(bpage, info, tail);
+               atomic_dec(&ts_dump);
+               /* Do not re-enable checking */
+               return;
+       }
+out:
+       atomic_dec(this_cpu_ptr(&checking));
+}
+#else
+static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
+                        struct rb_event_info *info,
+                        unsigned long tail)
+{
+}
+#endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */
+
 static struct ring_buffer_event *
 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
                  struct rb_event_info *info)
                                (void)rb_time_cmpxchg(&cpu_buffer->before_stamp,
                                                      info->before, info->after);
                }
+               if (a_ok && b_ok)
+                       check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
                return rb_move_tail(cpu_buffer, tail, info);
        }
 
                        /* Just use full timestamp for inerrupting event */
                        info->delta = info->ts;
                barrier();
+               check_buffer(cpu_buffer, info, tail);
                if (unlikely(info->ts != save_before)) {
                        /* SLOW PATH - Interrupted between C and E */