if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
                val &= ~ARMV8_PMU_PMCR_LP;
 
+       /* Request a reload of the PMU to enable/disable affected counters */
+       if ((__vcpu_sys_reg(vcpu, PMCR_EL0) ^ val) & ARMV8_PMU_PMCR_E)
+               kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
+
        /* The reset bits don't indicate any state, and shouldn't be saved. */
        __vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
 
-       if (val & ARMV8_PMU_PMCR_E) {
-               kvm_pmu_reprogram_counter_mask(vcpu,
-                      __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
-       } else {
-               kvm_pmu_reprogram_counter_mask(vcpu,
-                      __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
-       }
-
        if (val & ARMV8_PMU_PMCR_C)
                kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
 
                for_each_set_bit(i, &mask, 32)
                        kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
        }
-       kvm_vcpu_pmu_restore_guest(vcpu);
 }
 
 static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
 {
        u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
 
-       kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
-
        __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask;
        __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask;
        __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask;
+
+       kvm_pmu_reprogram_counter_mask(vcpu, mask);
 }
 
 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)