]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
KVM: arm64: Make ID_AA64PFR1_EL1.RAS_frac writable
authorMarc Zyngier <maz@kernel.org>
Sun, 17 Aug 2025 20:21:57 +0000 (21:21 +0100)
committerOliver Upton <oliver.upton@linux.dev>
Thu, 21 Aug 2025 23:31:56 +0000 (16:31 -0700)
Allow userspace to write to RAS_frac, under the condition that
the host supports RASv1p1 with RAS_frac==1. Other configurations
will result in RAS_frac being exposed as 0, and therefore implicitly
not writable.

To avoid the clutter, the ID_AA64PFR1_EL1 sanitisation is moved to
its own function.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Link: https://lore.kernel.org/r/20250817202158.395078-6-maz@kernel.org
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/kvm/sys_regs.c

index bf160693963c814f3da28b380a27bcc153c150a1..5abe4db6c008418979fc00084830defa7589ee7f 100644 (file)
@@ -1584,6 +1584,7 @@ static u8 pmuver_to_perfmon(u8 pmuver)
 }
 
 static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
+static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val);
 static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
 
 /* Read a sanitised cpufeature ID register by sys_reg_desc */
@@ -1606,19 +1607,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
                val = sanitise_id_aa64pfr0_el1(vcpu, val);
                break;
        case SYS_ID_AA64PFR1_EL1:
-               if (!kvm_has_mte(vcpu->kvm)) {
-                       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
-                       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac);
-               }
-
-               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
-               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_RNDR_trap);
-               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_NMI);
-               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_GCS);
-               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_THE);
-               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX);
-               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR);
-               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac);
+               val = sanitise_id_aa64pfr1_el1(vcpu, val);
                break;
        case SYS_ID_AA64PFR2_EL1:
                /* We only expose FPMR */
@@ -1834,6 +1823,31 @@ static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
        return val;
 }
 
+static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val)
+{
+       u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+
+       if (!kvm_has_mte(vcpu->kvm)) {
+               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
+               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac);
+       }
+
+       if (!(cpus_have_final_cap(ARM64_HAS_RASV1P1_EXTN) &&
+             SYS_FIELD_GET(ID_AA64PFR0_EL1, RAS, pfr0) == ID_AA64PFR0_EL1_RAS_IMP))
+               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_RAS_frac);
+
+       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
+       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_RNDR_trap);
+       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_NMI);
+       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_GCS);
+       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_THE);
+       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX);
+       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR);
+       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac);
+
+       return val;
+}
+
 static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
 {
        val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
@@ -2952,7 +2966,6 @@ static const struct sys_reg_desc sys_reg_descs[] = {
                                       ID_AA64PFR1_EL1_SME |
                                       ID_AA64PFR1_EL1_RES0 |
                                       ID_AA64PFR1_EL1_MPAM_frac |
-                                      ID_AA64PFR1_EL1_RAS_frac |
                                       ID_AA64PFR1_EL1_MTE)),
        ID_WRITABLE(ID_AA64PFR2_EL1, ID_AA64PFR2_EL1_FPMR),
        ID_UNALLOCATED(4,3),