]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
Merge remote-tracking branch 'arm64/for-next/cpufeature' into kvm-arm64/pkvm-fixed...
authorMarc Zyngier <maz@kernel.org>
Mon, 6 Jan 2025 17:48:19 +0000 (17:48 +0000)
committerMarc Zyngier <maz@kernel.org>
Sat, 11 Jan 2025 14:55:18 +0000 (14:55 +0000)
Merge arm64/for-next/cpufeature to solve extensive conflicts
caused by the CPACR_ELx->CPACR_EL1 repainting.

Signed-off-by: Marc Zyngier <maz@kernel.org>
1  2 
arch/arm64/include/asm/el2_setup.h
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/kvm/at.c
arch/arm64/kvm/hyp/nvhe/switch.c

Simple merge
Simple merge
index 2d91fb88298a263dcd73a4269318f8edf1379650,4f1d99725f6b3b6c14e110c5b6e967de9ae41812..55ddc135237360957168138e1a2ba1b1a71e2265
@@@ -629,12 -628,12 +629,12 @@@ static __always_inline void __kvm_reset
                if (cpus_have_final_cap(ARM64_SME))
                        val |= CPACR_EL1_SMEN_EL1EN;
        } else if (has_hvhe()) {
-               val = CPACR_ELx_FPEN;
+               val = CPACR_EL1_FPEN;
  
 -              if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
 +              if (!kvm_has_sve(kvm) || !guest_owns_fp_regs())
-                       val |= CPACR_ELx_ZEN;
+                       val |= CPACR_EL1_ZEN;
                if (cpus_have_final_cap(ARM64_SME))
-                       val |= CPACR_ELx_SMEN;
+                       val |= CPACR_EL1_SMEN;
        } else {
                val = CPTR_NVHE_EL2_RES1;
  
Simple merge
index 0ebf84a9f9e2715793bcd08c494539be25b6870e,0f6b01b3da5cfd4bfa7d75f901416abed0a5c7b2..6c846d033d24ad715f615252baf66ba28f14992b
@@@ -35,46 -36,33 +35,46 @@@ DEFINE_PER_CPU(unsigned long, kvm_hyp_v
  
  extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
  
 -static void __activate_traps(struct kvm_vcpu *vcpu)
 +static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
  {
 -      u64 val;
 +      u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
  
 -      ___activate_traps(vcpu, vcpu->arch.hcr_el2);
 -      __activate_traps_common(vcpu);
 +      if (has_hvhe()) {
-               val |= CPACR_ELx_TTA;
++              val |= CPACR_EL1_TTA;
  
 -      val = vcpu->arch.cptr_el2;
 -      val |= CPTR_EL2_TAM;    /* Same bit irrespective of E2H */
 -      val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
 -      if (cpus_have_final_cap(ARM64_SME)) {
 -              if (has_hvhe())
 -                      val &= ~CPACR_EL1_SMEN;
 -              else
 -                      val |= CPTR_EL2_TSM;
 -      }
 +              if (guest_owns_fp_regs()) {
-                       val |= CPACR_ELx_FPEN;
++                      val |= CPACR_EL1_FPEN;
 +                      if (vcpu_has_sve(vcpu))
-                               val |= CPACR_ELx_ZEN;
++                              val |= CPACR_EL1_ZEN;
 +              }
 +      } else {
 +              val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
  
 -      if (!guest_owns_fp_regs()) {
 -              if (has_hvhe())
 -                      val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN);
 -              else
 -                      val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
 +              /*
 +               * Always trap SME since it's not supported in KVM.
 +               * TSM is RES1 if SME isn't implemented.
 +               */
 +              val |= CPTR_EL2_TSM;
  
 -              __activate_traps_fpsimd32(vcpu);
 +              if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
 +                      val |= CPTR_EL2_TZ;
 +
 +              if (!guest_owns_fp_regs())
 +                      val |= CPTR_EL2_TFP;
        }
  
 +      if (!guest_owns_fp_regs())
 +              __activate_traps_fpsimd32(vcpu);
 +
        kvm_write_cptr_el2(val);
 +}
 +
 +static void __activate_traps(struct kvm_vcpu *vcpu)
 +{
 +      ___activate_traps(vcpu, vcpu->arch.hcr_el2);
 +      __activate_traps_common(vcpu);
 +      __activate_cptr_traps(vcpu);
 +
        write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
  
        if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {