]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: arm64: Calculate cptr_el2 traps on activating traps
authorFuad Tabba <tabba@google.com>
Mon, 16 Dec 2024 10:50:52 +0000 (10:50 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 20 Dec 2024 13:53:57 +0000 (13:53 +0000)
Similar to VHE, calculate the value of cptr_el2 from scratch on
activate traps. This removes the need to store cptr_el2 in every
vcpu structure. Moreover, some traps, such as whether the guest
owns the fp registers, need to be set on every vcpu run.

Reported-by: James Clark <james.clark@linaro.org>
Fixes: 5294afdbf45a ("KVM: arm64: Exclude FP ownership from kvm_vcpu_arch")
Signed-off-by: Fuad Tabba <tabba@google.com>
Link: https://lore.kernel.org/r/20241216105057.579031-13-tabba@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/nvhe/pkvm.c
arch/arm64/kvm/hyp/nvhe/switch.c

index d3230a22846f91b3b78db5f7423496603d188f33..ba9536c3076d23d7cbeb6432b2cdd18e828a20b8 100644 (file)
@@ -708,7 +708,6 @@ struct kvm_vcpu_arch {
        u64 hcr_el2;
        u64 hcrx_el2;
        u64 mdcr_el2;
-       u64 cptr_el2;
 
        /* Exception Information */
        struct kvm_vcpu_fault_info fault;
index b295218cdc24476c5035b7fcaf715f77334fe2f0..8a3d02cf0a7a28e6d2a2fe2b6d63a9dbbe2d4916 100644 (file)
@@ -1546,7 +1546,6 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
        }
 
        vcpu_reset_hcr(vcpu);
-       vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
 
        /*
         * Handle the "start in power-off" case.
index 2beab633d721ca3e10d5b5fc9d64a28325621c80..4ef08fb3bca444c5de7e85909c543fe2457fcf35 100644 (file)
@@ -83,44 +83,6 @@ static void pvm_init_traps_hcr(struct kvm_vcpu *vcpu)
        vcpu->arch.hcr_el2 = val;
 }
 
-static void pvm_init_traps_cptr(struct kvm_vcpu *vcpu)
-{
-       struct kvm *kvm = vcpu->kvm;
-       u64 val = vcpu->arch.cptr_el2;
-
-       if (!has_hvhe()) {
-               val |= CPTR_NVHE_EL2_RES1;
-               val &= ~(CPTR_NVHE_EL2_RES0);
-       }
-
-       if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
-               val |= CPTR_EL2_TAM;
-
-       /* SVE can be disabled by userspace even if supported. */
-       if (!vcpu_has_sve(vcpu)) {
-               if (has_hvhe())
-                       val &= ~(CPACR_ELx_ZEN);
-               else
-                       val |= CPTR_EL2_TZ;
-       }
-
-       /* No SME support in KVM. */
-       BUG_ON(kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP));
-       if (has_hvhe())
-               val &= ~(CPACR_ELx_SMEN);
-       else
-               val |= CPTR_EL2_TSM;
-
-       if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceVer, IMP)) {
-               if (has_hvhe())
-                       val |= CPACR_EL1_TTA;
-               else
-                       val |= CPTR_EL2_TTA;
-       }
-
-       vcpu->arch.cptr_el2 = val;
-}
-
 static void pvm_init_traps_mdcr(struct kvm_vcpu *vcpu)
 {
        struct kvm *kvm = vcpu->kvm;
@@ -191,7 +153,6 @@ static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
        struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
        int ret;
 
-       vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
        vcpu->arch.mdcr_el2 = 0;
 
        pkvm_vcpu_reset_hcr(vcpu);
@@ -204,7 +165,6 @@ static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
                return ret;
 
        pvm_init_traps_hcr(vcpu);
-       pvm_init_traps_cptr(vcpu);
        pvm_init_traps_mdcr(vcpu);
 
        return 0;
@@ -644,8 +604,6 @@ unlock:
                return ret;
        }
 
-       hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu);
-
        return 0;
 }
 
index 7786a83d0fa8d62cd2de536ff56918ef6de6d0e8..0ebf84a9f9e2715793bcd08c494539be25b6870e 100644 (file)
@@ -35,33 +35,46 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
 
 extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
 
-static void __activate_traps(struct kvm_vcpu *vcpu)
+static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
 {
-       u64 val;
+       u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
 
-       ___activate_traps(vcpu, vcpu->arch.hcr_el2);
-       __activate_traps_common(vcpu);
+       if (has_hvhe()) {
+               val |= CPACR_ELx_TTA;
 
-       val = vcpu->arch.cptr_el2;
-       val |= CPTR_EL2_TAM;    /* Same bit irrespective of E2H */
-       val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
-       if (cpus_have_final_cap(ARM64_SME)) {
-               if (has_hvhe())
-                       val &= ~CPACR_ELx_SMEN;
-               else
-                       val |= CPTR_EL2_TSM;
-       }
+               if (guest_owns_fp_regs()) {
+                       val |= CPACR_ELx_FPEN;
+                       if (vcpu_has_sve(vcpu))
+                               val |= CPACR_ELx_ZEN;
+               }
+       } else {
+               val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
 
-       if (!guest_owns_fp_regs()) {
-               if (has_hvhe())
-                       val &= ~(CPACR_ELx_FPEN | CPACR_ELx_ZEN);
-               else
-                       val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
+               /*
+                * Always trap SME since it's not supported in KVM.
+                * TSM is RES1 if SME isn't implemented.
+                */
+               val |= CPTR_EL2_TSM;
 
-               __activate_traps_fpsimd32(vcpu);
+               if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
+                       val |= CPTR_EL2_TZ;
+
+               if (!guest_owns_fp_regs())
+                       val |= CPTR_EL2_TFP;
        }
 
+       if (!guest_owns_fp_regs())
+               __activate_traps_fpsimd32(vcpu);
+
        kvm_write_cptr_el2(val);
+}
+
+static void __activate_traps(struct kvm_vcpu *vcpu)
+{
+       ___activate_traps(vcpu, vcpu->arch.hcr_el2);
+       __activate_traps_common(vcpu);
+       __activate_cptr_traps(vcpu);
+
        write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
 
        if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {