#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
-#include <linux/nmi.h>
 #include <asm/apicdef.h>
+#include <asm/nmi.h>
 
 #include "../perf_event.h"
 
        }
 }
 
+static void amd_pmu_disable_event(struct perf_event *event)
+{
+       x86_pmu_disable_event(event);
+
+       /*
+        * This can be called from NMI context (via x86_pmu_stop). The counter
+        * may have overflowed, but either way, we'll never see it get reset
+        * by the NMI if we're already in the NMI. And the NMI latency support
+        * below will take care of any pending NMI that might have been
+        * generated by the overflow.
+        */
+       if (in_nmi())
+               return;
+
+       amd_pmu_wait_on_overflow(event->hw.idx);
+}
+
 /*
  * Because of NMI latency, if multiple PMC counters are active or other sources
  * of NMIs are received, the perf NMI handler can handle one or more overflowed
        .disable_all            = amd_pmu_disable_all,
        .enable_all             = x86_pmu_enable_all,
        .enable                 = x86_pmu_enable_event,
-       .disable                = x86_pmu_disable_event,
+       .disable                = amd_pmu_disable_event,
        .hw_config              = amd_pmu_hw_config,
        .schedule_events        = x86_schedule_events,
        .eventsel               = MSR_K7_EVNTSEL0,
 
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
 
-       if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
+       if (test_bit(hwc->idx, cpuc->active_mask)) {
                x86_pmu.disable(event);
+               __clear_bit(hwc->idx, cpuc->active_mask);
                cpuc->events[hwc->idx] = NULL;
                WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
                hwc->state |= PERF_HES_STOPPED;
        apic_write(APIC_LVTPC, APIC_DM_NMI);
 
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
-               if (!test_bit(idx, cpuc->active_mask)) {
-                       /*
-                        * Though we deactivated the counter some cpus
-                        * might still deliver spurious interrupts still
-                        * in flight. Catch them:
-                        */
-                       if (__test_and_clear_bit(idx, cpuc->running))
-                               handled++;
+               if (!test_bit(idx, cpuc->active_mask))
                        continue;
-               }
 
                event = cpuc->events[idx];