return counter;
}
-/**
- * kvm_pmu_set_counter_value - set PMU counter value
- * @vcpu: The vcpu pointer
- * @select_idx: The counter index
- * @val: The counter value
- */
-void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
+static void kvm_pmu_set_counter(struct kvm_vcpu *vcpu, u64 select_idx, u64 val,
+ bool force)
{
u64 reg;
kvm_pmu_release_perf_event(&vcpu->arch.pmu.pmc[select_idx]);
reg = counter_index_to_reg(select_idx);
+
+ if (vcpu_mode_is_32bit(vcpu) && select_idx != ARMV8_PMU_CYCLE_IDX &&
+ !force) {
+ /*
+ * Even with PMUv3p5, AArch32 cannot write to the top
+ * 32bit of the counters. The only possible course of
+ * action is to use PMCR.P, which will reset them to
+ * 0 (the only use of the 'force' parameter).
+ */
+ val = __vcpu_sys_reg(vcpu, reg) & GENMASK(63, 32);
+ val |= lower_32_bits(val);
+ }
+
__vcpu_sys_reg(vcpu, reg) = val;
/* Recreate the perf event to reflect the updated sample_period */
kvm_pmu_create_perf_event(vcpu, select_idx);
}
+/**
+ * kvm_pmu_set_counter_value - set PMU counter value
+ * @vcpu: The vcpu pointer
+ * @select_idx: The counter index
+ * @val: The counter value
+ */
+void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
+{
+ kvm_pmu_set_counter(vcpu, select_idx, val, false);
+}
+
/**
* kvm_pmu_release_perf_event - remove the perf event
* @pmc: The PMU counter pointer
unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
for_each_set_bit(i, &mask, 32)
- kvm_pmu_set_counter_value(vcpu, i, 0);
+ kvm_pmu_set_counter(vcpu, i, 0, true);
}
}