]> www.infradead.org Git - users/hch/misc.git/commitdiff
KVM: arm64: PMU: Reload when resetting
authorAkihiko Odaki <akihiko.odaki@daynix.com>
Sat, 15 Mar 2025 09:12:14 +0000 (18:12 +0900)
committerOliver Upton <oliver.upton@linux.dev>
Mon, 17 Mar 2025 17:45:25 +0000 (10:45 -0700)
Replace kvm_pmu_vcpu_reset() with the generic PMU reloading mechanism to
ensure the consistency with system registers and to reduce code size.

Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20250315-pmc-v5-5-ecee87dab216@daynix.com
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/kvm/pmu-emul.c
arch/arm64/kvm/reset.c
arch/arm64/kvm/sys_regs.c
include/kvm/arm_pmu.h

index 8e10124a7420f5a1a601303f281cb3f107309108..aae5713d8993a7e53e406f88dd6a44012327e856 100644 (file)
@@ -254,20 +254,6 @@ void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
                pmu->pmc[i].idx = i;
 }
 
-/**
- * kvm_pmu_vcpu_reset - reset pmu state for cpu
- * @vcpu: The vcpu pointer
- *
- */
-void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
-{
-       unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu);
-       int i;
-
-       for_each_set_bit(i, &mask, 32)
-               kvm_pmu_stop_counter(kvm_vcpu_idx_to_pmc(vcpu, i));
-}
-
 /**
  * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
  * @vcpu: The vcpu pointer
index 803e11b0dc8f5eb74b07b0ad745b0c4f666713d5..f82fcc614e13681960cb3379466a99b198a2fa43 100644 (file)
@@ -196,9 +196,6 @@ void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
        vcpu->arch.reset_state.reset = false;
        spin_unlock(&vcpu->arch.mp_state_lock);
 
-       /* Reset PMU outside of the non-preemptible section */
-       kvm_pmu_vcpu_reset(vcpu);
-
        preempt_disable();
        loaded = (vcpu->cpu != -1);
        if (loaded)
index 727579acc7f6007e7afc5e14f0050a850f31abc1..14f66c7a4545af767818648868c00682d8a011e2 100644 (file)
@@ -4480,6 +4480,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
        }
 
        set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
+
+       if (kvm_vcpu_has_pmu(vcpu))
+               kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
 }
 
 /**
index a045284b3fd46b0d20cd74f04b1ef5cac7f74c96..7eaac08e47f56015b4f1edb159ca3af3e1452500 100644 (file)
@@ -52,7 +52,6 @@ u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu);
 u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu);
 u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
-void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
 void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val);
 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
@@ -127,7 +126,6 @@ static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
        return 0;
 }
 static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
-static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
 static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
 static inline void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
 static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}