static DEFINE_PER_CPU(bool, hard_watchdog_warn);
 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
+static DEFINE_PER_CPU(struct perf_event *, dead_event);
 static struct cpumask dead_events_mask;
 
 static unsigned long hardlockup_allcpu_dumped;
 
        if (event) {
                perf_event_disable(event);
+               this_cpu_write(watchdog_ev, NULL);
+               this_cpu_write(dead_event, event);
                cpumask_set_cpu(smp_processor_id(), &dead_events_mask);
                watchdog_cpus--;
        }
        int cpu;
 
        for_each_cpu(cpu, &dead_events_mask) {
-               struct perf_event *event = per_cpu(watchdog_ev, cpu);
+               struct perf_event *event = per_cpu(dead_event, cpu);
 
                /*
                 * Required because for_each_cpu() reports  unconditionally
                 */
                if (event)
                        perf_event_release_kernel(event);
-               per_cpu(watchdog_ev, cpu) = NULL;
+               per_cpu(dead_event, cpu) = NULL;
        }
        cpumask_clear(&dead_events_mask);
 }