clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
 }
 
+static void pmc_pause_counter(struct kvm_pmc *pmc)
+{
+       u64 counter = pmc->counter;
+
+       if (!pmc->perf_event)
+               return;
+
+       /* update counter, reset event value to avoid redundant accumulation */
+       counter += perf_event_pause(pmc->perf_event, true);
+       pmc->counter = counter & pmc_bitmask(pmc);
+}
+
+static bool pmc_resume_counter(struct kvm_pmc *pmc)
+{
+       if (!pmc->perf_event)
+               return false;
+
+       /* recalibrate sample period and check if it's accepted by perf core */
+       if (perf_event_period(pmc->perf_event,
+                       (-pmc->counter) & pmc_bitmask(pmc)))
+               return false;
+
+       /* reuse perf_event to serve as pmc_reprogram_counter() does*/
+       perf_event_enable(pmc->perf_event);
+
+       clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
+       return true;
+}
+
 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
 {
        unsigned config, type = PERF_TYPE_RAW;
 
        pmc->eventsel = eventsel;
 
-       pmc_stop_counter(pmc);
+       pmc_pause_counter(pmc);
 
        if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
                return;
        if (type == PERF_TYPE_RAW)
                config = eventsel & X86_RAW_EVENT_MASK;
 
+       if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
+               return;
+
+       pmc_release_perf_event(pmc);
+
+       pmc->current_config = eventsel;
        pmc_reprogram_counter(pmc, type, config,
                              !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
                              !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
        struct kvm_pmu_event_filter *filter;
        struct kvm *kvm = pmc->vcpu->kvm;
 
-       pmc_stop_counter(pmc);
+       pmc_pause_counter(pmc);
 
        if (!en_field || !pmc_is_enabled(pmc))
                return;
                        return;
        }
 
+       if (pmc->current_config == (u64)ctrl && pmc_resume_counter(pmc))
+               return;
+
+       pmc_release_perf_event(pmc);
+
+       pmc->current_config = (u64)ctrl;
        pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
                              kvm_x86_ops->pmu_ops->find_fixed_event(idx),
                              !(en_field & 0x2), /* exclude user */
 
        return counter & pmc_bitmask(pmc);
 }
 
-static inline void pmc_stop_counter(struct kvm_pmc *pmc)
+static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
 {
        if (pmc->perf_event) {
-               pmc->counter = pmc_read_counter(pmc);
                perf_event_release_kernel(pmc->perf_event);
                pmc->perf_event = NULL;
+               pmc->current_config = 0;
+       }
+}
+
+static inline void pmc_stop_counter(struct kvm_pmc *pmc)
+{
+       if (pmc->perf_event) {
+               pmc->counter = pmc_read_counter(pmc);
+               pmc_release_perf_event(pmc);
        }
 }
 
 
                pmu->gp_counters[i].type = KVM_PMC_GP;
                pmu->gp_counters[i].vcpu = vcpu;
                pmu->gp_counters[i].idx = i;
+               pmu->gp_counters[i].current_config = 0;
        }
 
        for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
                pmu->fixed_counters[i].type = KVM_PMC_FIXED;
                pmu->fixed_counters[i].vcpu = vcpu;
                pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
+               pmu->fixed_counters[i].current_config = 0;
        }
 }