]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: arm64: Handle counter access early in non-HYP context
authorMarc Zyngier <maz@kernel.org>
Tue, 17 Dec 2024 14:23:15 +0000 (14:23 +0000)
committerMarc Zyngier <maz@kernel.org>
Thu, 2 Jan 2025 19:19:10 +0000 (19:19 +0000)
We already deal with CNTPCT_EL0 accesses in non-HYP context.
Let's add CNTVCT_EL0 as a good measure.

This is also an opportunity to simplify things and make it
plain that this code is only for non-HYP context handling.

Acked-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20241217142321.763801-8-maz@kernel.org
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/hyp/include/hyp/switch.h

index 30e572de28749e23d1b718dd2539ddc8d81403cc..719479b42b32909e45d391332d54e95d25cae537 100644 (file)
@@ -506,7 +506,7 @@ static inline u64 compute_counter_value(struct arch_timer_context *ctxt)
        return arch_timer_read_cntpct_el0() - timer_get_offset(ctxt);
 }
 
-static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu)
+static bool kvm_handle_cntxct(struct kvm_vcpu *vcpu)
 {
        struct arch_timer_context *ctxt;
        u32 sysreg;
@@ -516,18 +516,19 @@ static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu)
         * We only get here for 64bit guests, 32bit guests will hit
         * the long and winding road all the way to the standard
         * handling. Yes, it sucks to be irrelevant.
+        *
+        * Also, we only deal with non-hypervisor context here (either
+        * an EL1 guest, or a non-HYP context of an EL2 guest).
         */
+       if (is_hyp_ctxt(vcpu))
+               return false;
+
        sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
 
        switch (sysreg) {
        case SYS_CNTPCT_EL0:
        case SYS_CNTPCTSS_EL0:
                if (vcpu_has_nv(vcpu)) {
-                       if (is_hyp_ctxt(vcpu)) {
-                               ctxt = vcpu_hptimer(vcpu);
-                               break;
-                       }
-
                        /* Check for guest hypervisor trapping */
                        val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
                        if (!vcpu_el2_e2h_is_set(vcpu))
@@ -539,16 +540,23 @@ static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu)
 
                ctxt = vcpu_ptimer(vcpu);
                break;
+       case SYS_CNTVCT_EL0:
+       case SYS_CNTVCTSS_EL0:
+               if (vcpu_has_nv(vcpu)) {
+                       /* Check for guest hypervisor trapping */
+                       val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
+
+                       if (val & CNTHCTL_EL1TVCT)
+                               return false;
+               }
+
+               ctxt = vcpu_vtimer(vcpu);
+               break;
        default:
                return false;
        }
 
-       val = arch_timer_read_cntpct_el0();
-
-       if (ctxt->offset.vm_offset)
-               val -= *kern_hyp_va(ctxt->offset.vm_offset);
-       if (ctxt->offset.vcpu_offset)
-               val -= *kern_hyp_va(ctxt->offset.vcpu_offset);
+       val = compute_counter_value(ctxt);
 
        vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val);
        __kvm_skip_instr(vcpu);
@@ -593,7 +601,7 @@ static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
            __vgic_v3_perform_cpuif_access(vcpu) == 1)
                return true;
 
-       if (kvm_hyp_handle_cntpct(vcpu))
+       if (kvm_handle_cntxct(vcpu))
                return true;
 
        return false;