static inline void perf_events_lapic_init(void)        { }
 #endif
 
+#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
+ extern void amd_pmu_enable_virt(void);
+ extern void amd_pmu_disable_virt(void);
+#else
+ static inline void amd_pmu_enable_virt(void) { }
+ static inline void amd_pmu_disable_virt(void) { }
+#endif
+
 #endif /* _ASM_X86_PERF_EVENT_H */
 
        /*
         * AMD specific bits
         */
-       struct amd_nb           *amd_nb;
+       struct amd_nb                   *amd_nb;
+       /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
+       u64                             perf_ctr_virt_mask;
 
        void                            *kfree_on_online;
 };
 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
                                          u64 enable_mask)
 {
+       u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
+
        if (hwc->extra_reg.reg)
                wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
-       wrmsrl(hwc->config_base, hwc->config | enable_mask);
+       wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
 }
 
 void x86_pmu_enable_all(int added);
 
 #include <linux/perf_event.h>
+#include <linux/export.h>
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/slab.h>
        struct amd_nb *nb;
        int i, nb_id;
 
-       if (boot_cpu_data.x86_max_cores < 2)
+       cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
+
+       if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15)
                return;
 
        nb_id = amd_get_nb_id(cpu);
        .put_event_constraints  = amd_put_event_constraints,
 
        .cpu_prepare            = amd_pmu_cpu_prepare,
-       .cpu_starting           = amd_pmu_cpu_starting,
        .cpu_dead               = amd_pmu_cpu_dead,
 #endif
+       .cpu_starting           = amd_pmu_cpu_starting,
 };
 
 __init int amd_pmu_init(void)
 
        return 0;
 }
+
+void amd_pmu_enable_virt(void)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+       cpuc->perf_ctr_virt_mask = 0;
+
+       /* Reload all events */
+       x86_pmu_disable_all();
+       x86_pmu_enable_all(0);
+}
+EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
+
+void amd_pmu_disable_virt(void)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+       /*
+        * We only mask out the Host-only bit so that host-only counting works
+        * when SVM is disabled. If someone sets up a guest-only counter when
+        * SVM is disabled the Guest-only bits still gets set and the counter
+        * will not count anything.
+        */
+       cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
+
+       /* Reload all events */
+       x86_pmu_disable_all();
+       x86_pmu_enable_all(0);
+}
+EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
 
 #include <linux/ftrace_event.h>
 #include <linux/slab.h>
 
+#include <asm/perf_event.h>
 #include <asm/tlbflush.h>
 #include <asm/desc.h>
 #include <asm/kvm_para.h>
                wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
 
        cpu_svm_disable();
+
+       amd_pmu_disable_virt();
 }
 
 static int svm_hardware_enable(void *garbage)
 
        svm_init_erratum_383();
 
+       amd_pmu_enable_virt();
+
        return 0;
 }