atomic_dec(&nmi_active);
 }
 
+static void __acpi_nmi_disable(void *__unused)
+{
+       apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
+}
+
 int __init check_nmi_watchdog(void)
 {
        unsigned int *prev_nmi_count;
        kfree(prev_nmi_count);
        return 0;
 error:
-       if (nmi_watchdog == NMI_IO_APIC && !timer_through_8259)
-               disable_8259A_irq(0);
+       if (nmi_watchdog == NMI_IO_APIC) {
+               if (!timer_through_8259)
+                       disable_8259A_irq(0);
+               on_each_cpu(__acpi_nmi_disable, NULL, 1);
+       }
+
 #ifdef CONFIG_X86_32
        timer_ack = 0;
 #endif
                on_each_cpu(__acpi_nmi_enable, NULL, 1);
 }
 
-static void __acpi_nmi_disable(void *__unused)
-{
-       apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
-}
-
 /*
  * Disable timer based NMIs on all CPUs:
  */