continue;
 
                /* log dropped samples number */
-               if (error[bit])
+               if (error[bit]) {
                        perf_log_lost_samples(event, error[bit]);
 
+                       if (perf_event_account_interrupt(event))
+                               x86_pmu_stop(event, 0);
+               }
+
                if (counts[bit]) {
                        __intel_pmu_pebs_event(event, iregs, base,
                                               top, bit, counts[bit]);
 
 extern void perf_event_disable_local(struct perf_event *event);
 extern void perf_event_disable_inatomic(struct perf_event *event);
 extern void perf_event_task_tick(void);
+extern int perf_event_account_interrupt(struct perf_event *event);
 #else /* !CONFIG_PERF_EVENTS: */
 static inline void *
 perf_aux_output_begin(struct perf_output_handle *handle,
 
        perf_output_end(&handle);
 }
 
-/*
- * Generic event overflow handling, sampling.
- */
-
-static int __perf_event_overflow(struct perf_event *event,
-                                  int throttle, struct perf_sample_data *data,
-                                  struct pt_regs *regs)
+static int
+__perf_event_account_interrupt(struct perf_event *event, int throttle)
 {
-       int events = atomic_read(&event->event_limit);
        struct hw_perf_event *hwc = &event->hw;
-       u64 seq;
        int ret = 0;
-
-       /*
-        * Non-sampling counters might still use the PMI to fold short
-        * hardware counters, ignore those.
-        */
-       if (unlikely(!is_sampling_event(event)))
-               return 0;
+       u64 seq;
 
        seq = __this_cpu_read(perf_throttled_seq);
        if (seq != hwc->interrupts_seq) {
                        perf_adjust_period(event, delta, hwc->last_period, true);
        }
 
+       return ret;
+}
+
+int perf_event_account_interrupt(struct perf_event *event)
+{
+       return __perf_event_account_interrupt(event, 1);
+}
+
+/*
+ * Generic event overflow handling, sampling.
+ */
+
+static int __perf_event_overflow(struct perf_event *event,
+                                  int throttle, struct perf_sample_data *data,
+                                  struct pt_regs *regs)
+{
+       int events = atomic_read(&event->event_limit);
+       int ret = 0;
+
+       /*
+        * Non-sampling counters might still use the PMI to fold short
+        * hardware counters, ignore those.
+        */
+       if (unlikely(!is_sampling_event(event)))
+               return 0;
+
+       ret = __perf_event_account_interrupt(event, throttle);
+
        /*
         * XXX event_limit might not quite work as expected on inherited
         * events