extern asmlinkage void irq_move_cleanup_interrupt(void);
 extern asmlinkage void reboot_interrupt(void);
 extern asmlinkage void threshold_interrupt(void);
+extern asmlinkage void deferred_error_interrupt(void);
 
 extern asmlinkage void call_function_interrupt(void);
 extern asmlinkage void call_function_single_interrupt(void);
 extern void trace_thermal_interrupt(void);
 extern void trace_reschedule_interrupt(void);
 extern void trace_threshold_interrupt(void);
+extern void trace_deferred_error_interrupt(void);
 extern void trace_call_function_interrupt(void);
 extern void trace_call_function_single_interrupt(void);
 #define trace_irq_move_cleanup_interrupt  irq_move_cleanup_interrupt
 
  *     - added support for AMD Family 0x10 processors
  *  May 2012
  *     - major scrubbing
+ *  May 2015
+ *     - add support for deferred error interrupts (Aravind Gopalakrishnan)
  *
  *  All MC4_MISCi registers are shared between multi-cores
  */
 #include <asm/idle.h>
 #include <asm/mce.h>
 #include <asm/msr.h>
+#include <asm/trace/irq_vectors.h>
 
 #define NR_BLOCKS         9
 #define THRESHOLD_MAX     0xFFF
 #define MASK_BLKPTR_LO    0xFF000000
 #define MCG_XBLK_ADDR     0xC0000400
 
+/* Deferred error settings */
+#define MSR_CU_DEF_ERR         0xC0000410
+#define MASK_DEF_LVTOFF                0x000000F0
+#define MASK_DEF_INT_TYPE      0x00000006
+#define DEF_LVT_OFF            0x2
+#define DEF_INT_TYPE_APIC      0x2
+
 static const char * const th_names[] = {
        "load_store",
        "insn_fetch",
 static DEFINE_PER_CPU(unsigned char, bank_map);        /* see which banks are on */
 
 static void amd_threshold_interrupt(void);
+static void amd_deferred_error_interrupt(void);
+
+static void default_deferred_error_interrupt(void)
+{
+       pr_err("Unexpected deferred interrupt at vector %x\n", DEFERRED_ERROR_VECTOR);
+}
+void (*deferred_error_int_vector)(void) = default_deferred_error_interrupt;
 
 /*
  * CPU Initialization
        return reserved;
 }
 
+static int setup_APIC_deferred_error(int reserved, int new)
+{
+       if (reserved < 0 && !setup_APIC_eilvt(new, DEFERRED_ERROR_VECTOR,
+                                             APIC_EILVT_MSG_FIX, 0))
+               return new;
+
+       return reserved;
+}
+
+static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
+{
+       u32 low = 0, high = 0;
+       int def_offset = -1, def_new;
+
+       if (rdmsr_safe(MSR_CU_DEF_ERR, &low, &high))
+               return;
+
+       def_new = (low & MASK_DEF_LVTOFF) >> 4;
+       if (!(low & MASK_DEF_LVTOFF)) {
+               pr_err(FW_BUG "Your BIOS is not setting up LVT offset 0x2 for deferred error IRQs correctly.\n");
+               def_new = DEF_LVT_OFF;
+               low = (low & ~MASK_DEF_LVTOFF) | (DEF_LVT_OFF << 4);
+       }
+
+       def_offset = setup_APIC_deferred_error(def_offset, def_new);
+       if ((def_offset == def_new) &&
+           (deferred_error_int_vector != amd_deferred_error_interrupt))
+               deferred_error_int_vector = amd_deferred_error_interrupt;
+
+       low = (low & ~MASK_DEF_INT_TYPE) | DEF_INT_TYPE_APIC;
+       wrmsr(MSR_CU_DEF_ERR, low, high);
+}
+
 /* cpu init entry point, called from mce.c with preempt off */
 void mce_amd_feature_init(struct cpuinfo_x86 *c)
 {
                        mce_threshold_block_init(&b, offset);
                }
        }
+
+       if (mce_flags.succor)
+               deferred_error_interrupt_enable(c);
 }
 
 static void __log_error(unsigned int bank, bool threshold_err, u64 misc)
        wrmsrl(MSR_IA32_MCx_STATUS(bank), 0);
 }
 
+static inline void __smp_deferred_error_interrupt(void)
+{
+       inc_irq_stat(irq_deferred_error_count);
+       deferred_error_int_vector();
+}
+
+asmlinkage __visible void smp_deferred_error_interrupt(void)
+{
+       entering_irq();
+       __smp_deferred_error_interrupt();
+       exiting_ack_irq();
+}
+
+asmlinkage __visible void smp_trace_deferred_error_interrupt(void)
+{
+       entering_irq();
+       trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
+       __smp_deferred_error_interrupt();
+       trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR);
+       exiting_ack_irq();
+}
+
+/* APIC interrupt handler for deferred errors */
+static void amd_deferred_error_interrupt(void)
+{
+       u64 status;
+       unsigned int bank;
+
+       for (bank = 0; bank < mca_cfg.banks; ++bank) {
+               rdmsrl(MSR_IA32_MCx_STATUS(bank), status);
+
+               if (!(status & MCI_STATUS_VAL) ||
+                   !(status & MCI_STATUS_DEFERRED))
+                       continue;
+
+               __log_error(bank, false, 0);
+               break;
+       }
+}
+
 /*
  * APIC Interrupt Handler
  */