#ifndef __ASM_CACHE_H
 #define __ASM_CACHE_H
 
-#define CTR_L1IP_SHIFT         14
-#define CTR_L1IP_MASK          3
-#define CTR_DMINLINE_SHIFT     16
-#define CTR_IMINLINE_SHIFT     0
-#define CTR_IMINLINE_MASK      0xf
-#define CTR_ERG_SHIFT          20
-#define CTR_CWG_SHIFT          24
-#define CTR_CWG_MASK           15
-#define CTR_IDC_SHIFT          28
-#define CTR_DIC_SHIFT          29
-
-#define CTR_CACHE_MINLINE_MASK \
-       (0xf << CTR_DMINLINE_SHIFT | CTR_IMINLINE_MASK << CTR_IMINLINE_SHIFT)
-
-#define ICACHE_POLICY_VPIPT    0
-#define ICACHE_POLICY_VIPT     2
-#define ICACHE_POLICY_PIPT     3
-
 #define L1_CACHE_SHIFT         (6)
 #define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
 
 
 #include <asm/cputype.h>
 #include <asm/mte-def.h>
+#include <asm/sysreg.h>
 
 #ifdef CONFIG_KASAN_SW_TAGS
 #define ARCH_SLAB_MINALIGN     (1ULL << KASAN_SHADOW_SCALE_SHIFT)
 #define arch_slab_minalign() arch_slab_minalign()
 #endif
 
-#define CTR_L1IP(ctr)          (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
+#define CTR_CACHE_MINLINE_MASK \
+       (0xf << CTR_EL0_DMINLINE_SHIFT | \
+        CTR_EL0_IMINLINE_MASK << CTR_EL0_IMINLINE_SHIFT)
+
+#define CTR_L1IP(ctr)          SYS_FIELD_GET(CTR_EL0, L1Ip, ctr)
 
 #define ICACHEF_ALIASING       0
 #define ICACHEF_VPIPT          1
 
 static inline u32 cache_type_cwg(void)
 {
-       return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK;
+       return (read_cpuid_cachetype() >> CTR_EL0_CWG_SHIFT) & CTR_EL0_CWG_MASK;
 }
 
 #define __read_mostly __section(".data..read_mostly")
 {
        u32 ctr = read_cpuid_cachetype();
 
-       if (!(ctr & BIT(CTR_IDC_SHIFT))) {
+       if (!(ctr & BIT(CTR_EL0_IDC_SHIFT))) {
                u64 clidr = read_sysreg(clidr_el1);
 
                if (CLIDR_LOC(clidr) == 0 ||
                    (CLIDR_LOUIS(clidr) == 0 && CLIDR_LOUU(clidr) == 0))
-                       ctr |= BIT(CTR_IDC_SHIFT);
+                       ctr |= BIT(CTR_EL0_IDC_SHIFT);
        }
 
        return ctr;
 
 #define MVFR2_FPMISC_SHIFT             4
 #define MVFR2_SIMDMISC_SHIFT           0
 
+#define CTR_EL0_L1Ip_VPIPT             0
+#define CTR_EL0_L1Ip_VIPT              2
+#define CTR_EL0_L1Ip_PIPT              3
+
+#define CTR_EL0_L1Ip_SHIFT             14
+#define CTR_EL0_L1Ip_MASK              3
+#define CTR_EL0_DminLine_SHIFT         16
+#define CTR_EL0_IminLine_SHIFT         0
+#define CTR_EL0_IminLine_MASK          0xf
+#define CTR_EL0_ERG_SHIFT              20
+#define CTR_EL0_CWG_SHIFT              24
+#define CTR_EL0_CWG_MASK               15
+#define CTR_EL0_IDC_SHIFT              28
+#define CTR_EL0_DIC_SHIFT              29
+
 #define DCZID_DZP_SHIFT                        4
 #define DCZID_BS_SHIFT                 0
 
 
 
        ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
        d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0,
-                                                          CTR_DMINLINE_SHIFT);
+                                                          CTR_EL0_DminLine_SHIFT);
        cur = start & ~(d_size - 1);
        do {
                /*
 
                                int scope)
 {
        u32 midr = read_cpuid_id();
-       bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
+       bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT);
        const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
 
        WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 
 
 static const struct arm64_ftr_bits ftr_ctr[] = {
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_DIC_SHIFT, 1, 1),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_IDC_SHIFT, 1, 1),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_EL0_CWG_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_EL0_ERG_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_DminLine_SHIFT, 4, 1),
        /*
         * Linux can handle differing I-cache policies. Userspace JITs will
         * make use of *minLine.
         * If we have differing I-cache policies, report it as the weakest - VIPT.
         */
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, CTR_L1IP_SHIFT, 2, ICACHE_POLICY_VIPT),   /* L1Ip */
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, CTR_EL0_L1Ip_SHIFT, 2, CTR_EL0_L1Ip_VIPT),        /* L1Ip */
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_IminLine_SHIFT, 4, 0),
        ARM64_FTR_END,
 };
 
        else
                ctr = read_cpuid_effective_cachetype();
 
-       return ctr & BIT(CTR_IDC_SHIFT);
+       return ctr & BIT(CTR_EL0_IDC_SHIFT);
 }
 
 static void cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities *__unused)
         * to the CTR_EL0 on this CPU and emulate it with the real/safe
         * value.
         */
-       if (!(read_cpuid_cachetype() & BIT(CTR_IDC_SHIFT)))
+       if (!(read_cpuid_cachetype() & BIT(CTR_EL0_IDC_SHIFT)))
                sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
 }
 
        else
                ctr = read_cpuid_cachetype();
 
-       return ctr & BIT(CTR_DIC_SHIFT);
+       return ctr & BIT(CTR_EL0_DIC_SHIFT);
 }
 
 static bool __maybe_unused
 
 static inline const char *icache_policy_str(int l1ip)
 {
        switch (l1ip) {
-       case ICACHE_POLICY_VPIPT:
+       case CTR_EL0_L1Ip_VPIPT:
                return "VPIPT";
-       case ICACHE_POLICY_VIPT:
+       case CTR_EL0_L1Ip_VIPT:
                return "VIPT";
-       case ICACHE_POLICY_PIPT:
+       case CTR_EL0_L1Ip_PIPT:
                return "PIPT";
        default:
                return "RESERVED/UNKNOWN";
        u32 l1ip = CTR_L1IP(info->reg_ctr);
 
        switch (l1ip) {
-       case ICACHE_POLICY_PIPT:
+       case CTR_EL0_L1Ip_PIPT:
                break;
-       case ICACHE_POLICY_VPIPT:
+       case CTR_EL0_L1Ip_VPIPT:
                set_bit(ICACHEF_VPIPT, &__icache_flags);
                break;
-       case ICACHE_POLICY_VIPT:
+       case CTR_EL0_L1Ip_VIPT:
        default:
                /* Assume aliasing */
                set_bit(ICACHEF_ALIASING, &__icache_flags);
 
 
        if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
                /* Hide DIC so that we can trap the unnecessary maintenance...*/
-               val &= ~BIT(CTR_DIC_SHIFT);
+               val &= ~BIT(CTR_EL0_DIC_SHIFT);
 
                /* ... and fake IminLine to reduce the number of traps. */
-               val &= ~CTR_IMINLINE_MASK;
-               val |= (PAGE_SHIFT - 2) & CTR_IMINLINE_MASK;
+               val &= ~CTR_EL0_IminLine_MASK;
+               val |= (PAGE_SHIFT - 2) & CTR_EL0_IminLine_MASK;
        }
 
        pt_regs_write_reg(regs, rt, val);