]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: arm64: Set protected VM traps based on its view of feature registers
authorFuad Tabba <tabba@google.com>
Mon, 16 Dec 2024 10:50:48 +0000 (10:50 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 20 Dec 2024 13:53:00 +0000 (13:53 +0000)
Now that the VM's feature id registers are initialized with the
values of the supported features, use those values to determine
which traps to set using kvm_has_feature().

Signed-off-by: Fuad Tabba <tabba@google.com>
Link: https://lore.kernel.org/r/20241216105057.579031-9-tabba@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/hyp/nvhe/pkvm.c
arch/arm64/kvm/hyp/nvhe/sys_regs.c

index 6eddd29b264b334277fca644f5c95d1c033c63a2..23afc63cac559a70b8e23f3f72b150d7a85c2371 100644 (file)
@@ -52,9 +52,7 @@ static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
 
 static void pvm_init_traps_hcr(struct kvm_vcpu *vcpu)
 {
-       const u64 id_aa64pfr0 = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
-       const u64 id_aa64pfr1 = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR1_EL1);
-       const u64 id_aa64mmfr1 = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR1_EL1);
+       struct kvm *kvm = vcpu->kvm;
        u64 val = vcpu->arch.hcr_el2;
 
        /* No support for AArch32. */
@@ -70,24 +68,20 @@ static void pvm_init_traps_hcr(struct kvm_vcpu *vcpu)
         */
        val |= HCR_TACR | HCR_TIDCP | HCR_TID3 | HCR_TID1;
 
-       /* Trap RAS */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), id_aa64pfr0)) {
+       if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) {
                val |= HCR_TERR | HCR_TEA;
                val &= ~(HCR_FIEN);
        }
 
-       /* Trap AMU */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), id_aa64pfr0))
+       if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
                val &= ~(HCR_AMVOFFEN);
 
-       /* Memory Tagging: Trap and Treat as Untagged if not supported. */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), id_aa64pfr1)) {
+       if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, MTE, IMP)) {
                val |= HCR_TID5;
                val &= ~(HCR_DCT | HCR_ATA);
        }
 
-       /* Trap LOR */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), id_aa64mmfr1))
+       if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
                val |= HCR_TLOR;
 
        vcpu->arch.hcr_el2 = val;
@@ -95,9 +89,7 @@ static void pvm_init_traps_hcr(struct kvm_vcpu *vcpu)
 
 static void pvm_init_traps_cptr(struct kvm_vcpu *vcpu)
 {
-       const u64 id_aa64pfr0 = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
-       const u64 id_aa64pfr1 = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR1_EL1);
-       const u64 id_aa64dfr0 = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
+       struct kvm *kvm = vcpu->kvm;
        u64 val = vcpu->arch.cptr_el2;
 
        if (!has_hvhe()) {
@@ -105,12 +97,11 @@ static void pvm_init_traps_cptr(struct kvm_vcpu *vcpu)
                val &= ~(CPTR_NVHE_EL2_RES0);
        }
 
-       /* Trap AMU */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), id_aa64pfr0))
+       if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
                val |= CPTR_EL2_TAM;
 
-       /* Trap SVE */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), id_aa64pfr0)) {
+       /* SVE can be disabled by userspace even if supported. */
+       if (!vcpu_has_sve(vcpu)) {
                if (has_hvhe())
                        val &= ~(CPACR_ELx_ZEN);
                else
@@ -118,14 +109,13 @@ static void pvm_init_traps_cptr(struct kvm_vcpu *vcpu)
        }
 
        /* No SME support in KVM. */
-       BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME), id_aa64pfr1));
+       BUG_ON(kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP));
        if (has_hvhe())
                val &= ~(CPACR_ELx_SMEN);
        else
                val |= CPTR_EL2_TSM;
 
-       /* Trap Trace */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), id_aa64dfr0)) {
+       if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceVer, IMP)) {
                if (has_hvhe())
                        val |= CPACR_EL1_TTA;
                else
@@ -137,40 +127,33 @@ static void pvm_init_traps_cptr(struct kvm_vcpu *vcpu)
 
 static void pvm_init_traps_mdcr(struct kvm_vcpu *vcpu)
 {
-       const u64 id_aa64dfr0 = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
-       const u64 id_aa64mmfr0 = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR0_EL1);
+       struct kvm *kvm = vcpu->kvm;
        u64 val = vcpu->arch.mdcr_el2;
 
-       /* Trap/constrain PMU */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), id_aa64dfr0)) {
+       if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP)) {
                val |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
                val &= ~(MDCR_EL2_HPME | MDCR_EL2_MTPME | MDCR_EL2_HPMN_MASK);
        }
 
-       /* Trap Debug */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), id_aa64dfr0))
+       if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DebugVer, IMP))
                val |= MDCR_EL2_TDRA | MDCR_EL2_TDA;
 
-       /* Trap OS Double Lock */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), id_aa64dfr0))
+       if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DoubleLock, IMP))
                val |= MDCR_EL2_TDOSA;
 
-       /* Trap SPE */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), id_aa64dfr0)) {
+       if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP)) {
                val |= MDCR_EL2_TPMS;
                val &= ~MDCR_EL2_E2PB_MASK;
        }
 
-       /* Trap Trace Filter */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), id_aa64dfr0))
+       if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP))
                val |= MDCR_EL2_TTRF;
 
-       /* Trap External Trace */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), id_aa64dfr0))
+       if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, ExtTrcBuff, IMP))
                val |= MDCR_EL2_E2TB_MASK;
 
        /* Trap Debug Communications Channel registers */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), id_aa64mmfr0))
+       if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, FGT, IMP))
                val |= MDCR_EL2_TDCC;
 
        vcpu->arch.mdcr_el2 = val;
@@ -182,31 +165,24 @@ static void pvm_init_traps_mdcr(struct kvm_vcpu *vcpu)
  */
 static int pkvm_check_pvm_cpu_features(struct kvm_vcpu *vcpu)
 {
-       /*
-        * PAuth is allowed if supported by the system and the vcpu.
-        * Properly checking for PAuth requires checking various fields in
-        * ID_AA64ISAR1_EL1 and ID_AA64ISAR2_EL1. The way that fixed config
-        * is controlled now in pKVM does not easily allow that. This will
-        * change later to follow the changes upstream wrt fixed configuration
-        * and nested virt.
-        */
-       BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI),
-                               PVM_ID_AA64ISAR1_ALLOW));
+       struct kvm *kvm = vcpu->kvm;
 
        /* Protected KVM does not support AArch32 guests. */
-       BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
-               PVM_ID_AA64PFR0_ALLOW) != ID_AA64PFR0_EL1_EL0_IMP);
-       BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
-               PVM_ID_AA64PFR0_ALLOW) != ID_AA64PFR0_EL1_EL1_IMP);
+       if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL0, AARCH32) ||
+           kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL1, AARCH32))
+               return -EINVAL;
 
        /*
         * Linux guests assume support for floating-point and Advanced SIMD. Do
         * not change the trapping behavior for these from the KVM default.
         */
-       BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP),
-                               PVM_ID_AA64PFR0_ALLOW));
-       BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD),
-                               PVM_ID_AA64PFR0_ALLOW));
+       if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, FP, IMP) ||
+           !kvm_has_feat(kvm, ID_AA64PFR0_EL1, AdvSIMD, IMP))
+               return -EINVAL;
+
+       /* No SME support in KVM right now. Check to catch if it changes. */
+       if (kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP))
+               return -EINVAL;
 
        return 0;
 }
index 2aea44c911bdd2fb503412d9abb6ee09473e93c6..398563d3a2663438cb0fbfab7113aada6c2069b4 100644 (file)
@@ -286,13 +286,6 @@ static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu,
                return false;
        }
 
-       /*
-        * No support for AArch32 guests, therefore, pKVM has no sanitized copy
-        * of AArch32 feature id registers.
-        */
-       BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
-                    PVM_ID_AA64PFR0_ALLOW) > ID_AA64PFR0_EL1_EL1_IMP);
-
        return pvm_access_raz_wi(vcpu, p, r);
 }