* file will be supported by older perf tools, with these new optional
         * fields being ignored.
         *
+        * struct sample_id {
+        *      { u32                   pid, tid; } && PERF_SAMPLE_TID
+        *      { u64                   time;     } && PERF_SAMPLE_TIME
+        *      { u64                   id;       } && PERF_SAMPLE_ID
+        *      { u64                   stream_id;} && PERF_SAMPLE_STREAM_ID
+        *      { u32                   cpu, res; } && PERF_SAMPLE_CPU
+        * } && perf_event_attr::sample_id_all
+        */
+
+       /*
         * The MMAP events record the PROT_EXEC mappings so that we can
         * correlate userspace IPs to code. They have the following structure:
         *
         *      struct perf_event_header        header;
         *      u64                             id;
         *      u64                             lost;
+        *      struct sample_id                sample_id;
         * };
         */
        PERF_RECORD_LOST                        = 2,
         *
         *      u32                             pid, tid;
         *      char                            comm[];
+        *      struct sample_id                sample_id;
         * };
         */
        PERF_RECORD_COMM                        = 3,
         *      u32                             pid, ppid;
         *      u32                             tid, ptid;
         *      u64                             time;
+        *      struct sample_id                sample_id;
         * };
         */
        PERF_RECORD_EXIT                        = 4,
         *      u64                             time;
         *      u64                             id;
         *      u64                             stream_id;
+        *      struct sample_id                sample_id;
         * };
         */
        PERF_RECORD_THROTTLE                    = 5,
         *      u32                             pid, ppid;
         *      u32                             tid, ptid;
         *      u64                             time;
+        *      struct sample_id                sample_id;
         * };
         */
        PERF_RECORD_FORK                        = 7,
         *      u32                             pid, tid;
         *
         *      struct read_format              values;
+        *      struct sample_id                sample_id;
         * };
         */
        PERF_RECORD_READ                        = 8,
         *        u64                   dyn_size; } && PERF_SAMPLE_STACK_USER
         *
         *      { u64                   weight;   } && PERF_SAMPLE_WEIGHT
-        *      { u64                   data_src;     } && PERF_SAMPLE_DATA_SRC
+        *      { u64                   data_src; } && PERF_SAMPLE_DATA_SRC
         * };
         */
        PERF_RECORD_SAMPLE                      = 9,
 
                }
        }
 
-       if (!event->attr.watermark) {
-               int wakeup_events = event->attr.wakeup_events;
-
-               if (wakeup_events) {
-                       struct ring_buffer *rb = handle->rb;
-                       int events = local_inc_return(&rb->events);
-
-                       if (events >= wakeup_events) {
-                               local_sub(wakeup_events, &rb->events);
-                               local_inc(&rb->wakeup);
-                       }
-               }
-       }
-
        if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
                if (data->br_stack) {
                        size_t size;
                }
        }
 
-       if (sample_type & PERF_SAMPLE_STACK_USER)
+       if (sample_type & PERF_SAMPLE_STACK_USER) {
                perf_output_sample_ustack(handle,
                                          data->stack_user_size,
                                          data->regs_user.regs);
+       }
 
        if (sample_type & PERF_SAMPLE_WEIGHT)
                perf_output_put(handle, data->weight);
 
        if (sample_type & PERF_SAMPLE_DATA_SRC)
                perf_output_put(handle, data->data_src.val);
+
+       if (!event->attr.watermark) {
+               int wakeup_events = event->attr.wakeup_events;
+
+               if (wakeup_events) {
+                       struct ring_buffer *rb = handle->rb;
+                       int events = local_inc_return(&rb->events);
+
+                       if (events >= wakeup_events) {
+                               local_sub(wakeup_events, &rb->events);
+                               local_inc(&rb->wakeup);
+                       }
+               }
+       }
 }
 
 void perf_prepare_sample(struct perf_event_header *header,