struct kvm_pmu *pmu = pmc_to_pmu(pmc);
        bool skip_pmi = false;
 
-       /* Ignore counters that have been reprogrammed already. */
+       /*
+        * Ignore overflow events for counters that are scheduled to be
+        * reprogrammed, e.g. if a PMI for the previous event races with KVM's
+        * handling of a related guest WRMSR.
+        */
        if (test_and_set_bit(pmc->idx, pmu->reprogram_pmi))
                return;
 
        return allow_event;
 }
 
-void reprogram_counter(struct kvm_pmc *pmc)
+static void reprogram_counter(struct kvm_pmc *pmc)
 {
        struct kvm_pmu *pmu = pmc_to_pmu(pmc);
        u64 eventsel = pmc->eventsel;
 reprogram_complete:
        clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
 }
-EXPORT_SYMBOL_GPL(reprogram_counter);
 
 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
 {
        for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
                struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, bit);
 
-               if (unlikely(!pmc || !pmc->perf_event)) {
+               if (unlikely(!pmc)) {
                        clear_bit(bit, pmu->reprogram_pmi);
                        continue;
                }
+
                reprogram_counter(pmc);
        }
 
 static inline bool cpl_is_matched(struct kvm_pmc *pmc)
 {
        bool select_os, select_user;
-       u64 config = pmc->current_config;
+       u64 config;
 
        if (pmc_is_gp(pmc)) {
+               config = pmc->eventsel;
                select_os = config & ARCH_PERFMON_EVENTSEL_OS;
                select_user = config & ARCH_PERFMON_EVENTSEL_USR;
        } else {
+               config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl,
+                                         pmc->idx - INTEL_PMC_IDX_FIXED);
                select_os = config & 0x1;
                select_user = config & 0x2;
        }
 
                                             KVM_PMC_MAX_FIXED);
 }
 
-void reprogram_counter(struct kvm_pmc *pmc);
+static inline void kvm_pmu_request_counter_reprogam(struct kvm_pmc *pmc)
+{
+       set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
+       kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
+}
 
 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
 
                pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
 
                __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
-               reprogram_counter(pmc);
+               kvm_pmu_request_counter_reprogam(pmc);
        }
 }
 
        for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) {
                pmc = intel_pmc_idx_to_pmc(pmu, bit);
                if (pmc)
-                       reprogram_counter(pmc);
+                       kvm_pmu_request_counter_reprogam(pmc);
        }
 }
 
                                reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
                        if (!(data & reserved_bits)) {
                                pmc->eventsel = data;
-                               reprogram_counter(pmc);
+                               kvm_pmu_request_counter_reprogam(pmc);
                                return 0;
                        }
                } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false))