*       { u64         time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
  *       { u64         time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
  *       { u64         id;           } && PERF_FORMAT_ID
+ *       { u64         lost;         } && PERF_FORMAT_LOST
  *     } && !PERF_FORMAT_GROUP
  *
  *     { u64           nr;
  *       { u64         time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
  *       { u64         value;
  *         { u64       id;           } && PERF_FORMAT_ID
+ *         { u64       lost;         } && PERF_FORMAT_LOST
  *       }             cntr[nr];
  *     } && PERF_FORMAT_GROUP
  * };
        PERF_FORMAT_TOTAL_TIME_RUNNING          = 1U << 1,
        PERF_FORMAT_ID                          = 1U << 2,
        PERF_FORMAT_GROUP                       = 1U << 3,
+       PERF_FORMAT_LOST                        = 1U << 4,
 
-       PERF_FORMAT_MAX = 1U << 4,              /* non-ABI */
+       PERF_FORMAT_MAX = 1U << 5,              /* non-ABI */
 };
 
 #define PERF_ATTR_SIZE_VER0    64      /* sizeof first published struct */
 
        if (event->attr.read_format & PERF_FORMAT_ID)
                entry += sizeof(u64);
 
+       if (event->attr.read_format & PERF_FORMAT_LOST)
+               entry += sizeof(u64);
+
        if (event->attr.read_format & PERF_FORMAT_GROUP) {
                nr += nr_siblings;
                size += sizeof(u64);
        values[n++] += perf_event_count(leader);
        if (read_format & PERF_FORMAT_ID)
                values[n++] = primary_event_id(leader);
+       if (read_format & PERF_FORMAT_LOST)
+               values[n++] = atomic64_read(&leader->lost_samples);
 
        for_each_sibling_event(sub, leader) {
                values[n++] += perf_event_count(sub);
                if (read_format & PERF_FORMAT_ID)
                        values[n++] = primary_event_id(sub);
+               if (read_format & PERF_FORMAT_LOST)
+                       values[n++] = atomic64_read(&sub->lost_samples);
        }
 
        raw_spin_unlock_irqrestore(&ctx->lock, flags);
                                 u64 read_format, char __user *buf)
 {
        u64 enabled, running;
-       u64 values[4];
+       u64 values[5];
        int n = 0;
 
        values[n++] = __perf_event_read_value(event, &enabled, &running);
                values[n++] = running;
        if (read_format & PERF_FORMAT_ID)
                values[n++] = primary_event_id(event);
+       if (read_format & PERF_FORMAT_LOST)
+               values[n++] = atomic64_read(&event->lost_samples);
 
        if (copy_to_user(buf, values, n * sizeof(u64)))
                return -EFAULT;
                                 u64 enabled, u64 running)
 {
        u64 read_format = event->attr.read_format;
-       u64 values[4];
+       u64 values[5];
        int n = 0;
 
        values[n++] = perf_event_count(event);
        }
        if (read_format & PERF_FORMAT_ID)
                values[n++] = primary_event_id(event);
+       if (read_format & PERF_FORMAT_LOST)
+               values[n++] = atomic64_read(&event->lost_samples);
 
        __output_copy(handle, values, n * sizeof(u64));
 }
 {
        struct perf_event *leader = event->group_leader, *sub;
        u64 read_format = event->attr.read_format;
-       u64 values[5];
+       u64 values[6];
        int n = 0;
 
        values[n++] = 1 + leader->nr_siblings;
        values[n++] = perf_event_count(leader);
        if (read_format & PERF_FORMAT_ID)
                values[n++] = primary_event_id(leader);
+       if (read_format & PERF_FORMAT_LOST)
+               values[n++] = atomic64_read(&leader->lost_samples);
 
        __output_copy(handle, values, n * sizeof(u64));
 
                values[n++] = perf_event_count(sub);
                if (read_format & PERF_FORMAT_ID)
                        values[n++] = primary_event_id(sub);
+               if (read_format & PERF_FORMAT_LOST)
+                       values[n++] = atomic64_read(&sub->lost_samples);
 
                __output_copy(handle, values, n * sizeof(u64));
        }