]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: arm64: Consolidate allowed and restricted VM feature checks
authorMarc Zyngier <maz@kernel.org>
Fri, 20 Dec 2024 11:33:05 +0000 (11:33 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 20 Dec 2024 13:39:10 +0000 (13:39 +0000)
The definitions for features allowed and allowed with
restrictions for protected guests, which are based on feature
registers, were defined and checked for separately, even though
they are handled in the same way. This could result in missing
checks for certain features, e.g., pointer authentication,
causing traps for allowed features.

Consolidate the definitions into one. Use that new definition to
construct the guest view of the feature registers for
consistency.

Signed-off-by: Fuad Tabba <tabba@google.com>
Link: https://lore.kernel.org/r/20241216105057.579031-2-tabba@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
arch/arm64/kvm/hyp/nvhe/pkvm.c
arch/arm64/kvm/hyp/nvhe/sys_regs.c

index f957890c7e381efa30fe49a82c8c1b0b68ac6833..d1e59b88ff663688089f23dcaecd8bbff35fbb7e 100644 (file)
  * guest virtual machines, depending on the mode KVM is running in and on the
  * type of guest that is running.
  *
- * The ALLOW masks represent a bitmask of feature fields that are allowed
- * without any restrictions as long as they are supported by the system.
- *
- * The RESTRICT_UNSIGNED masks, if present, represent unsigned fields for
- * features that are restricted to support at most the specified feature.
+ * Each field in the masks represents the highest supported *unsigned* value for
+ * the feature, if supported by the system.
  *
  * If a feature field is not present in either, than it is not supported.
  *
  * - Floating-point and Advanced SIMD
  * - Data Independent Timing
  * - Spectre/Meltdown Mitigation
- */
-#define PVM_ID_AA64PFR0_ALLOW (\
-       ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \
-       ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \
-       ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) | \
-       ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | \
-       ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3) \
-       )
-
-/*
+ *
  * Restrict to the following *unsigned* features for protected VMs:
  * - AArch64 guests only (no support for AArch32 guests):
  *     AArch32 adds complexity in trap handling, emulation, condition codes,
  * - RAS (v1)
  *     Supported by KVM
  */
-#define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\
+#define PVM_ID_AA64PFR0_ALLOW (\
+       ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \
+       ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \
+       ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) | \
+       ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | \
+       ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3) | \
        SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL0, IMP) | \
        SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL1, IMP) | \
        SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL2, IMP) | \
  * - Distinction between Secure and Non-secure Memory
  * - Mixed-endian at EL0 only
  * - Non-context synchronizing exception entry and exit
+ *
+ * Restrict to the following *unsigned* features for protected VMs:
+ * - 40-bit IPA
+ * - 16-bit ASID
  */
 #define PVM_ID_AA64MMFR0_ALLOW (\
        ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGEND) | \
        ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_SNSMEM) | \
        ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGENDEL0) | \
-       ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_EXS) \
-       )
-
-/*
- * Restrict to the following *unsigned* features for protected VMs:
- * - 40-bit IPA
- * - 16-bit ASID
- */
-#define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\
+       ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_EXS) | \
        FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_PARANGE), ID_AA64MMFR0_EL1_PARANGE_40) | \
        FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_ASIDBITS), ID_AA64MMFR0_EL1_ASIDBITS_16) \
        )
        )
 
 /* Restrict pointer authentication to the basic version. */
-#define PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED (\
-       FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), ID_AA64ISAR1_EL1_APA_PAuth) | \
-       FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), ID_AA64ISAR1_EL1_API_PAuth) \
-       )
-
-#define PVM_ID_AA64ISAR2_RESTRICT_UNSIGNED (\
-       FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3), ID_AA64ISAR2_EL1_APA3_PAuth) \
-       )
-
 #define PVM_ID_AA64ISAR1_ALLOW (\
        ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_DPB) | \
        ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_JSCVT) | \
        ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_SPECRES) | \
        ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_BF16) | \
        ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_DGH) | \
-       ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_I8MM) \
+       ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_I8MM) | \
+       FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), ID_AA64ISAR1_EL1_APA_PAuth) | \
+       FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), ID_AA64ISAR1_EL1_API_PAuth) \
        )
 
 #define PVM_ID_AA64ISAR2_ALLOW (\
        ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_ATS1A)| \
        ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3) | \
-       ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS) \
+       ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS) | \
+       FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3), ID_AA64ISAR2_EL1_APA3_PAuth) \
        )
 
 u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id);
index 071993c16de81ca0b0181c56d0598b1b026ae018..42b4d6e3b15c0be6629b706780110c75b2b53eea 100644 (file)
@@ -36,9 +36,9 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
 
        /* Protected KVM does not support AArch32 guests. */
        BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
-               PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_EL0_IMP);
+               PVM_ID_AA64PFR0_ALLOW) != ID_AA64PFR0_EL1_EL0_IMP);
        BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
-               PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_EL1_IMP);
+               PVM_ID_AA64PFR0_ALLOW) != ID_AA64PFR0_EL1_EL1_IMP);
 
        /*
         * Linux guests assume support for floating-point and Advanced SIMD. Do
@@ -362,8 +362,8 @@ static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struc
        if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), PVM_ID_AA64PFR0_ALLOW))
                set_bit(KVM_ARM_VCPU_SVE, allowed_features);
 
-       if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED) &&
-           FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED))
+       if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), PVM_ID_AA64ISAR1_ALLOW) &&
+           FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), PVM_ID_AA64ISAR1_ALLOW))
                set_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, allowed_features);
 
        if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI), PVM_ID_AA64ISAR1_ALLOW) &&
index 2860548d4250854c68da2ba8d6ce0e98ec19f0fd..59fb2f0561774b2c1b2491d4efbf0836a1aa3f93 100644 (file)
@@ -89,7 +89,7 @@ static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
        u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
 
        set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
-               PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
+               PVM_ID_AA64PFR0_ALLOW);
 
        return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
 }
@@ -189,7 +189,7 @@ static u64 get_pvm_id_aa64mmfr0(const struct kvm_vcpu *vcpu)
        u64 set_mask;
 
        set_mask = get_restricted_features_unsigned(id_aa64mmfr0_el1_sys_val,
-               PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED);
+               PVM_ID_AA64MMFR0_ALLOW);
 
        return (id_aa64mmfr0_el1_sys_val & PVM_ID_AA64MMFR0_ALLOW) | set_mask;
 }
@@ -276,7 +276,7 @@ static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu,
         * of AArch32 feature id registers.
         */
        BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
-                    PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_EL1_EL1_IMP);
+                    PVM_ID_AA64PFR0_ALLOW) > ID_AA64PFR0_EL1_EL1_IMP);
 
        return pvm_access_raz_wi(vcpu, p, r);
 }