]> www.infradead.org Git - users/hch/misc.git/commitdiff
KVM: arm64: PMU: Assume PMU presence in pmu-emul.c
authorAkihiko Odaki <akihiko.odaki@daynix.com>
Sat, 15 Mar 2025 09:12:11 +0000 (18:12 +0900)
committerOliver Upton <oliver.upton@linux.dev>
Mon, 17 Mar 2025 17:42:22 +0000 (10:42 -0700)
Many functions in pmu-emul.c checks kvm_vcpu_has_pmu(vcpu). A favorable
interpretation is defensive programming, but it also has downsides:

- It is confusing as it implies these functions are called without PMU
  although most of them are called only when a PMU is present.

- It makes semantics of functions fuzzy. For example, calling
  kvm_pmu_disable_counter_mask() without PMU may result in no-op as
  there are no enabled counters, but it's unclear what
  kvm_pmu_get_counter_value() returns when there is no PMU.

- It allows callers without checking kvm_vcpu_has_pmu(vcpu), but it is
  often wrong to call these functions without PMU.

- It is error-prone to duplicate kvm_vcpu_has_pmu(vcpu) checks into
  multiple functions. Many functions are called for system registers,
  and the system register infrastructure already employs less
  error-prone, comprehensive checks.

Check kvm_vcpu_has_pmu(vcpu) in callers of these functions instead,
and remove the obsolete checks from pmu-emul.c. The only exceptions are
the functions that implement ioctls as they have definitive semantics
even when the PMU is not present.

Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20250315-pmc-v5-2-ecee87dab216@daynix.com
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/kvm/arm.c
arch/arm64/kvm/emulate-nested.c
arch/arm64/kvm/pmu-emul.c
arch/arm64/kvm/sys_regs.c

index 0160b492435113b8ba2167dc95ab1a114f5435c6..caa1357fa3673ab2ea07168d267f58f5eab284be 100644 (file)
@@ -835,9 +835,11 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
        if (ret)
                return ret;
 
-       ret = kvm_arm_pmu_v3_enable(vcpu);
-       if (ret)
-               return ret;
+       if (kvm_vcpu_has_pmu(vcpu)) {
+               ret = kvm_arm_pmu_v3_enable(vcpu);
+               if (ret)
+                       return ret;
+       }
 
        if (is_protected_kvm_enabled()) {
                ret = pkvm_create_hyp_vm(kvm);
@@ -1148,7 +1150,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                 */
                preempt_disable();
 
-               kvm_pmu_flush_hwstate(vcpu);
+               if (kvm_vcpu_has_pmu(vcpu))
+                       kvm_pmu_flush_hwstate(vcpu);
 
                local_irq_disable();
 
@@ -1167,7 +1170,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                if (ret <= 0 || kvm_vcpu_exit_request(vcpu, &ret)) {
                        vcpu->mode = OUTSIDE_GUEST_MODE;
                        isb(); /* Ensure work in x_flush_hwstate is committed */
-                       kvm_pmu_sync_hwstate(vcpu);
+                       if (kvm_vcpu_has_pmu(vcpu))
+                               kvm_pmu_sync_hwstate(vcpu);
                        if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
                                kvm_timer_sync_user(vcpu);
                        kvm_vgic_sync_hwstate(vcpu);
@@ -1197,7 +1201,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                 * that the vgic can properly sample the updated state of the
                 * interrupt line.
                 */
-               kvm_pmu_sync_hwstate(vcpu);
+               if (kvm_vcpu_has_pmu(vcpu))
+                       kvm_pmu_sync_hwstate(vcpu);
 
                /*
                 * Sync the vgic state before syncing the timer state because
index 607d37bab70b48993af7bc9509ac40cc5cd5c8b0..9293fb078fc62130b2901e96a128991f90bcb6f6 100644 (file)
@@ -2516,7 +2516,8 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
        kvm_arch_vcpu_load(vcpu, smp_processor_id());
        preempt_enable();
 
-       kvm_pmu_nested_transition(vcpu);
+       if (kvm_vcpu_has_pmu(vcpu))
+               kvm_pmu_nested_transition(vcpu);
 }
 
 static void kvm_inject_el2_exception(struct kvm_vcpu *vcpu, u64 esr_el2,
@@ -2599,7 +2600,8 @@ static int kvm_inject_nested(struct kvm_vcpu *vcpu, u64 esr_el2,
        kvm_arch_vcpu_load(vcpu, smp_processor_id());
        preempt_enable();
 
-       kvm_pmu_nested_transition(vcpu);
+       if (kvm_vcpu_has_pmu(vcpu))
+               kvm_pmu_nested_transition(vcpu);
 
        return 1;
 }
index 6c5950b9ceac884e30fae742b6c20b1c1a2b22f5..98fdc65f5b24bcd58860181769faba803e665be0 100644 (file)
@@ -150,9 +150,6 @@ static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc)
  */
 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
 {
-       if (!kvm_vcpu_has_pmu(vcpu))
-               return 0;
-
        return kvm_pmu_get_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
 }
 
@@ -191,9 +188,6 @@ static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force)
  */
 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
 {
-       if (!kvm_vcpu_has_pmu(vcpu))
-               return;
-
        kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx), val, false);
 }
 
@@ -350,7 +344,7 @@ void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val)
 {
        int i;
 
-       if (!kvm_vcpu_has_pmu(vcpu) || !val)
+       if (!val)
                return;
 
        for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
@@ -401,9 +395,6 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
        struct kvm_pmu *pmu = &vcpu->arch.pmu;
        bool overflow;
 
-       if (!kvm_vcpu_has_pmu(vcpu))
-               return;
-
        overflow = kvm_pmu_overflow_status(vcpu);
        if (pmu->irq_level == overflow)
                return;
@@ -599,9 +590,6 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
 {
        int i;
 
-       if (!kvm_vcpu_has_pmu(vcpu))
-               return;
-
        /* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
        if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
                val &= ~ARMV8_PMU_PMCR_LP;
@@ -766,9 +754,6 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
        struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx);
        u64 reg;
 
-       if (!kvm_vcpu_has_pmu(vcpu))
-               return;
-
        reg = counter_index_to_evtreg(pmc->idx);
        __vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm);
 
@@ -848,9 +833,6 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
        u64 val, mask = 0;
        int base, i, nr_events;
 
-       if (!kvm_vcpu_has_pmu(vcpu))
-               return 0;
-
        if (!pmceid1) {
                val = read_sysreg(pmceid0_el0);
                /* always support CHAIN */
@@ -900,9 +882,6 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
 
 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
 {
-       if (!kvm_vcpu_has_pmu(vcpu))
-               return 0;
-
        if (!vcpu->arch.pmu.created)
                return -EINVAL;
 
@@ -1231,9 +1210,6 @@ void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu)
        unsigned long mask;
        int i;
 
-       if (!kvm_vcpu_has_pmu(vcpu))
-               return;
-
        mask = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
        for_each_set_bit(i, &mask, 32) {
                struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
index ffee72fd1273efe96ab4ee658716b2677715efeb..e8e9c781a9290ffebde91477598bdcdd7de76cff 100644 (file)
@@ -1853,12 +1853,14 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
 static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
                                      const struct sys_reg_desc *rd)
 {
-       u8 perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
+       u8 perfmon;
        u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
 
        val &= ~ID_DFR0_EL1_PerfMon_MASK;
-       if (kvm_vcpu_has_pmu(vcpu))
+       if (kvm_vcpu_has_pmu(vcpu)) {
+               perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
                val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
+       }
 
        val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8);