static DEFINE_PER_CPU(struct mce, mces_seen);
 static int                     cpu_missing;
 
+/* CMCI storm detection filter */
+static DEFINE_PER_CPU(unsigned long, mce_polled_error);
+
 /*
  * MCA banks polled by the period polling timer for corrected events.
  * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
 {
        struct mce m;
        int i;
+       unsigned long *v;
 
        this_cpu_inc(mce_poll_count);
 
                if (!(m.status & MCI_STATUS_VAL))
                        continue;
 
+               v = &get_cpu_var(mce_polled_error);
+               set_bit(0, v);
                /*
                 * Uncorrected or signalled events are handled by the exception
                 * handler when it is enabled, so don't process those here.
 static unsigned long (*mce_adjust_timer)(unsigned long interval) =
        mce_adjust_timer_default;
 
+static int cmc_error_seen(void)
+{
+       unsigned long *v = &__get_cpu_var(mce_polled_error);
+
+       return test_and_clear_bit(0, v);
+}
+
 static void mce_timer_fn(unsigned long data)
 {
        struct timer_list *t = &__get_cpu_var(mce_timer);
        unsigned long iv;
+       int notify;
 
        WARN_ON(smp_processor_id() != data);
 
         * polling interval, otherwise increase the polling interval.
         */
        iv = __this_cpu_read(mce_next_interval);
-       if (mce_notify_irq()) {
+       notify = mce_notify_irq();
+       notify |= cmc_error_seen();
+       if (notify) {
                iv = max(iv / 2, (unsigned long) HZ/100);
        } else {
                iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
 
 #include <linux/interrupt.h>
 #include <linux/percpu.h>
 #include <linux/sched.h>
+#include <linux/cpumask.h>
 #include <asm/apic.h>
 #include <asm/processor.h>
 #include <asm/msr.h>
        }
 }
 
+static void cmci_storm_disable_banks(void)
+{
+       unsigned long flags, *owned;
+       int bank;
+       u64 val;
+
+       raw_spin_lock_irqsave(&cmci_discover_lock, flags);
+       owned = __get_cpu_var(mce_banks_owned);
+       for_each_set_bit(bank, owned, MAX_NR_BANKS) {
+               rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
+               val &= ~MCI_CTL2_CMCI_EN;
+               wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
+       }
+       raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
+}
+
 static bool cmci_storm_detect(void)
 {
        unsigned int cnt = __this_cpu_read(cmci_storm_cnt);
        if (cnt <= CMCI_STORM_THRESHOLD)
                return false;
 
-       cmci_clear();
+       cmci_storm_disable_banks();
        __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE);
        r = atomic_add_return(1, &cmci_storm_on_cpus);
        mce_timer_kick(CMCI_POLL_INTERVAL);