]> www.infradead.org Git - users/griffoul/linux.git/commitdiff
KVM: arm64: Get rid of ARM64_FEATURE_MASK()
authorMarc Zyngier <maz@kernel.org>
Sun, 17 Aug 2025 20:21:58 +0000 (21:21 +0100)
committerOliver Upton <oliver.upton@linux.dev>
Thu, 21 Aug 2025 23:31:56 +0000 (16:31 -0700)
The ARM64_FEATURE_MASK() macro was a hack introduce whilst the
automatic generation of sysreg encoding was introduced, and was
too unreliable to be entirely trusted.

We are in a better place now, and we could really do without this
macro. Get rid of it altogether.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20250817202158.395078-7-maz@kernel.org
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/include/asm/sysreg.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/sys_regs.c
tools/arch/arm64/include/asm/sysreg.h
tools/testing/selftests/kvm/arm64/aarch32_id_regs.c
tools/testing/selftests/kvm/arm64/debug-exceptions.c
tools/testing/selftests/kvm/arm64/no-vgic-v3.c
tools/testing/selftests/kvm/arm64/page_fault_test.c
tools/testing/selftests/kvm/arm64/set_id_regs.c
tools/testing/selftests/kvm/arm64/vpmu_counter_access.c
tools/testing/selftests/kvm/lib/arm64/processor.c

index 948007cd368459a1804857377bd95cec51de7284..845875991eb86a235733060c1c2ebb5853079399 100644 (file)
 
 #define ARM64_FEATURE_FIELD_BITS       4
 
-/* Defined for compatibility only, do not add new users. */
-#define ARM64_FEATURE_MASK(x)  (x##_MASK)
-
 #ifdef __ASSEMBLY__
 
        .macro  mrs_s, rt, sreg
index 7a1a8210ff91852ce995135670d02a4c2ec0dcb7..d60d2a644391cd263e74638b6c9e43de4052749a 100644 (file)
@@ -2404,12 +2404,12 @@ static u64 get_hyp_id_aa64pfr0_el1(void)
         */
        u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
 
-       val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
-                ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
+       val &= ~(ID_AA64PFR0_EL1_CSV2 |
+                ID_AA64PFR0_EL1_CSV3);
 
-       val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
+       val |= FIELD_PREP(ID_AA64PFR0_EL1_CSV2,
                          arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
-       val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
+       val |= FIELD_PREP(ID_AA64PFR0_EL1_CSV3,
                          arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
 
        return val;
index 5abe4db6c008418979fc00084830defa7589ee7f..e387d1dfed1e64b06340f2f9bdbb94b565a9df45 100644 (file)
@@ -1615,18 +1615,18 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
                break;
        case SYS_ID_AA64ISAR1_EL1:
                if (!vcpu_has_ptrauth(vcpu))
-                       val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
-                                ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
-                                ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
-                                ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
+                       val &= ~(ID_AA64ISAR1_EL1_APA |
+                                ID_AA64ISAR1_EL1_API |
+                                ID_AA64ISAR1_EL1_GPA |
+                                ID_AA64ISAR1_EL1_GPI);
                break;
        case SYS_ID_AA64ISAR2_EL1:
                if (!vcpu_has_ptrauth(vcpu))
-                       val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
-                                ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
+                       val &= ~(ID_AA64ISAR2_EL1_APA3 |
+                                ID_AA64ISAR2_EL1_GPA3);
                if (!cpus_have_final_cap(ARM64_HAS_WFXT) ||
                    has_broken_cntvoff())
-                       val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
+                       val &= ~ID_AA64ISAR2_EL1_WFxT;
                break;
        case SYS_ID_AA64ISAR3_EL1:
                val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_FAMINMAX;
@@ -1642,7 +1642,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
                       ID_AA64MMFR3_EL1_S1PIE;
                break;
        case SYS_ID_MMFR4_EL1:
-               val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
+               val &= ~ID_MMFR4_EL1_CCIDX;
                break;
        }
 
@@ -1828,22 +1828,22 @@ static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val)
        u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
 
        if (!kvm_has_mte(vcpu->kvm)) {
-               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
-               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac);
+               val &= ~ID_AA64PFR1_EL1_MTE;
+               val &= ~ID_AA64PFR1_EL1_MTE_frac;
        }
 
        if (!(cpus_have_final_cap(ARM64_HAS_RASV1P1_EXTN) &&
              SYS_FIELD_GET(ID_AA64PFR0_EL1, RAS, pfr0) == ID_AA64PFR0_EL1_RAS_IMP))
-               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_RAS_frac);
-
-       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
-       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_RNDR_trap);
-       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_NMI);
-       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_GCS);
-       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_THE);
-       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX);
-       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR);
-       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac);
+               val &= ~ID_AA64PFR1_EL1_RAS_frac;
+
+       val &= ~ID_AA64PFR1_EL1_SME;
+       val &= ~ID_AA64PFR1_EL1_RNDR_trap;
+       val &= ~ID_AA64PFR1_EL1_NMI;
+       val &= ~ID_AA64PFR1_EL1_GCS;
+       val &= ~ID_AA64PFR1_EL1_THE;
+       val &= ~ID_AA64PFR1_EL1_MTEX;
+       val &= ~ID_AA64PFR1_EL1_PFAR;
+       val &= ~ID_AA64PFR1_EL1_MPAM_frac;
 
        return val;
 }
index 690b6ebd118f4f5dbbe582ee54751ee6534b1d00..65f2759ea27a35cfce57f7459d8f16f1c002f0af 100644 (file)
 
 #define ARM64_FEATURE_FIELD_BITS       4
 
-/* Defined for compatibility only, do not add new users. */
-#define ARM64_FEATURE_MASK(x)  (x##_MASK)
-
 #ifdef __ASSEMBLY__
 
        .macro  mrs_s, rt, sreg
index cef8f7323ceb8842145d6678f5106fe839c7fd8e..713005b6f508e800c1c62b0e8ed8c4f2dab9de71 100644 (file)
@@ -146,7 +146,7 @@ static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu)
 
        val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
 
-       el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
+       el0 = FIELD_GET(ID_AA64PFR0_EL1_EL0, val);
        return el0 == ID_AA64PFR0_EL1_EL0_IMP;
 }
 
index c7fb55c9135ba45d90356c050c10d2aeb0c607b5..521991a89ad94a0cc11a3bc46ea8ed47225f4386 100644 (file)
@@ -116,12 +116,12 @@ static void reset_debug_state(void)
 
        /* Reset all bcr/bvr/wcr/wvr registers */
        dfr0 = read_sysreg(id_aa64dfr0_el1);
-       brps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_BRPs), dfr0);
+       brps = FIELD_GET(ID_AA64DFR0_EL1_BRPs, dfr0);
        for (i = 0; i <= brps; i++) {
                write_dbgbcr(i, 0);
                write_dbgbvr(i, 0);
        }
-       wrps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_WRPs), dfr0);
+       wrps = FIELD_GET(ID_AA64DFR0_EL1_WRPs, dfr0);
        for (i = 0; i <= wrps; i++) {
                write_dbgwcr(i, 0);
                write_dbgwvr(i, 0);
@@ -418,7 +418,7 @@ static void guest_code_ss(int test_cnt)
 
 static int debug_version(uint64_t id_aa64dfr0)
 {
-       return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), id_aa64dfr0);
+       return FIELD_GET(ID_AA64DFR0_EL1_DebugVer, id_aa64dfr0);
 }
 
 static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn)
@@ -539,14 +539,14 @@ void test_guest_debug_exceptions_all(uint64_t aa64dfr0)
        int b, w, c;
 
        /* Number of breakpoints */
-       brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_BRPs), aa64dfr0) + 1;
+       brp_num = FIELD_GET(ID_AA64DFR0_EL1_BRPs, aa64dfr0) + 1;
        __TEST_REQUIRE(brp_num >= 2, "At least two breakpoints are required");
 
        /* Number of watchpoints */
-       wrp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_WRPs), aa64dfr0) + 1;
+       wrp_num = FIELD_GET(ID_AA64DFR0_EL1_WRPs, aa64dfr0) + 1;
 
        /* Number of context aware breakpoints */
-       ctx_brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_CTX_CMPs), aa64dfr0) + 1;
+       ctx_brp_num = FIELD_GET(ID_AA64DFR0_EL1_CTX_CMPs, aa64dfr0) + 1;
 
        pr_debug("%s brp_num:%d, wrp_num:%d, ctx_brp_num:%d\n", __func__,
                 brp_num, wrp_num, ctx_brp_num);
index ebd70430c89ded6b00b402f9615dc380c66e8ce3..f222538e608418065d745c6df07378269486550b 100644 (file)
@@ -54,7 +54,7 @@ static void guest_code(void)
         * Check that we advertise that ID_AA64PFR0_EL1.GIC == 0, having
         * hidden the feature at runtime without any other userspace action.
         */
-       __GUEST_ASSERT(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC),
+       __GUEST_ASSERT(FIELD_GET(ID_AA64PFR0_EL1_GIC,
                                 read_sysreg(id_aa64pfr0_el1)) == 0,
                       "GICv3 wrongly advertised");
 
@@ -165,7 +165,7 @@ int main(int argc, char *argv[])
 
        vm = vm_create_with_one_vcpu(&vcpu, NULL);
        pfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
-       __TEST_REQUIRE(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), pfr0),
+       __TEST_REQUIRE(FIELD_GET(ID_AA64PFR0_EL1_GIC, pfr0),
                       "GICv3 not supported.");
        kvm_vm_free(vm);
 
index dc6559dad9d863373d966fbe02c8c9ce34c9a53e..4ccbd389d13369f687b66bb0c177385fbe4fdf5b 100644 (file)
@@ -95,14 +95,14 @@ static bool guest_check_lse(void)
        uint64_t isar0 = read_sysreg(id_aa64isar0_el1);
        uint64_t atomic;
 
-       atomic = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_ATOMIC), isar0);
+       atomic = FIELD_GET(ID_AA64ISAR0_EL1_ATOMIC, isar0);
        return atomic >= 2;
 }
 
 static bool guest_check_dc_zva(void)
 {
        uint64_t dczid = read_sysreg(dczid_el0);
-       uint64_t dzp = FIELD_GET(ARM64_FEATURE_MASK(DCZID_EL0_DZP), dczid);
+       uint64_t dzp = FIELD_GET(DCZID_EL0_DZP, dczid);
 
        return dzp == 0;
 }
@@ -195,7 +195,7 @@ static bool guest_set_ha(void)
        uint64_t hadbs, tcr;
 
        /* Skip if HA is not supported. */
-       hadbs = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HAFDBS), mmfr1);
+       hadbs = FIELD_GET(ID_AA64MMFR1_EL1_HAFDBS, mmfr1);
        if (hadbs == 0)
                return false;
 
index d3bf9204409c358cd6b7bc3713bc4448c78915a2..36d40c267b994950455dcb640f427ebb3cf89f24 100644 (file)
@@ -594,8 +594,8 @@ static void test_user_set_mte_reg(struct kvm_vcpu *vcpu)
         */
        val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
 
-       mte = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), val);
-       mte_frac = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac), val);
+       mte = FIELD_GET(ID_AA64PFR1_EL1_MTE, val);
+       mte_frac = FIELD_GET(ID_AA64PFR1_EL1_MTE_frac, val);
        if (mte != ID_AA64PFR1_EL1_MTE_MTE2 ||
            mte_frac != ID_AA64PFR1_EL1_MTE_frac_NI) {
                ksft_test_result_skip("MTE_ASYNC or MTE_ASYMM are supported, nothing to test\n");
@@ -612,7 +612,7 @@ static void test_user_set_mte_reg(struct kvm_vcpu *vcpu)
        }
 
        val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
-       mte_frac = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac), val);
+       mte_frac = FIELD_GET(ID_AA64PFR1_EL1_MTE_frac, val);
        if (mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI)
                ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac=0 accepted and still 0xF\n");
        else
@@ -774,7 +774,7 @@ int main(void)
 
        /* Check for AARCH64 only system */
        val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
-       el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
+       el0 = FIELD_GET(ID_AA64PFR0_EL1_EL0, val);
        aarch64_only = (el0 == ID_AA64PFR0_EL1_EL0_IMP);
 
        ksft_print_header();
index f16b3b27e32ed7ca57481f27d689d47783aa0345..a0c4ab8391559741a3e22e74869dcabd83c08393 100644 (file)
@@ -441,7 +441,7 @@ static void create_vpmu_vm(void *guest_code)
 
        /* Make sure that PMUv3 support is indicated in the ID register */
        dfr0 = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1));
-       pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0);
+       pmuver = FIELD_GET(ID_AA64DFR0_EL1_PMUVer, dfr0);
        TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF &&
                    pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP,
                    "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
index 9d69904cb6084a8bbb2fa16546fad3846fd21f86..eb115123d74118035b6f09ecc52a4b51eca9be51 100644 (file)
@@ -573,15 +573,15 @@ void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k,
        err = ioctl(vcpu_fd, KVM_GET_ONE_REG, &reg);
        TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd));
 
-       gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN4), val);
+       gran = FIELD_GET(ID_AA64MMFR0_EL1_TGRAN4, val);
        *ipa4k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN4_NI,
                                        ID_AA64MMFR0_EL1_TGRAN4_52_BIT);
 
-       gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN64), val);
+       gran = FIELD_GET(ID_AA64MMFR0_EL1_TGRAN64, val);
        *ipa64k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN64_NI,
                                        ID_AA64MMFR0_EL1_TGRAN64_IMP);
 
-       gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN16), val);
+       gran = FIELD_GET(ID_AA64MMFR0_EL1_TGRAN16, val);
        *ipa16k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN16_NI,
                                        ID_AA64MMFR0_EL1_TGRAN16_52_BIT);