* @get_wallclock:             get time from HW clock like RTC etc.
  * @set_wallclock:             set time back to HW clock
  * @is_untracked_pat_range     exclude from PAT logic
+ * @nmi_init                   enable NMI on cpus
  */
 struct x86_platform_ops {
        unsigned long (*calibrate_tsc)(void);
        int (*set_wallclock)(unsigned long nowtime);
        void (*iommu_shutdown)(void);
        bool (*is_untracked_pat_range)(u64 start, u64 end);
+       void (*nmi_init)(void);
 };
 
 extern struct x86_init_ops x86_init;
 
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/pci.h>
+#include <linux/kdebug.h>
 
 #include <asm/uv/uv_mmrs.h>
 #include <asm/uv/uv_hub.h>
 static u64 gru_start_paddr, gru_end_paddr;
 int uv_min_hub_revision_id;
 EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
+static DEFINE_SPINLOCK(uv_nmi_lock);
 
 static inline bool is_GRU_range(u64 start, u64 end)
 {
        if (!strcmp(oem_id, "SGI")) {
                nodeid = early_get_nodeid();
                x86_platform.is_untracked_pat_range =  uv_is_untracked_pat_range;
+               x86_platform.nmi_init = uv_nmi_init;
                if (!strcmp(oem_table_id, "UVL"))
                        uv_system_type = UV_LEGACY_APIC;
                else if (!strcmp(oem_table_id, "UVX"))
                set_x2apic_extra_bits(uv_hub_info->pnode);
 }
 
+/*
+ * When NMI is received, print a stack trace.
+ */
+int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
+{
+       if (reason != DIE_NMI_IPI)
+               return NOTIFY_OK;
+       /*
+        * Use a lock so only one cpu prints at a time
+        * to prevent intermixed output.
+        */
+       spin_lock(&uv_nmi_lock);
+       pr_info("NMI stack dump cpu %u:\n", smp_processor_id());
+       dump_stack();
+       spin_unlock(&uv_nmi_lock);
+
+       return NOTIFY_STOP;
+}
+
+static struct notifier_block uv_dump_stack_nmi_nb = {
+       .notifier_call  = uv_handle_nmi
+};
+
+void uv_register_nmi_notifier(void)
+{
+       if (register_die_notifier(&uv_dump_stack_nmi_nb))
+               printk(KERN_WARNING "UV NMI handler failed to register\n");
+}
+
+void uv_nmi_init(void)
+{
+       unsigned int value;
+
+       /*
+        * Unmask NMI on all cpus
+        */
+       value = apic_read(APIC_LVT1) | APIC_DM_NMI;
+       value &= ~APIC_LVT_MASKED;
+       apic_write(APIC_LVT1, value);
+}
 
 void __init uv_system_init(void)
 {
 
        uv_cpu_init();
        uv_scir_register_cpu_notifier();
+       uv_register_nmi_notifier();
        proc_mkdir("sgi_uv", NULL);
 
        /* register Legacy VGA I/O redirection handler */
 
        .setup_percpu_clockev           = setup_secondary_APIC_clock,
 };
 
+static void default_nmi_init(void) { };
+
 struct x86_platform_ops x86_platform = {
        .calibrate_tsc                  = native_calibrate_tsc,
        .get_wallclock                  = mach_get_cmos_time,
        .set_wallclock                  = mach_set_rtc_mmss,
        .iommu_shutdown                 = iommu_shutdown_noop,
        .is_untracked_pat_range         = is_ISA_range,
+       .nmi_init                       = default_nmi_init
 };