#ifndef __ASM_CPUFEATURE_H
 #define __ASM_CPUFEATURE_H
 
-#include <linux/jump_label.h>
-
 #include <asm/hwcap.h>
 #include <asm/sysreg.h>
 
 
 #ifndef __ASSEMBLY__
 
+#include <linux/bug.h>
+#include <linux/jump_label.h>
 #include <linux/kernel.h>
 
 /* CPU feature register tracking */
        return elf_hwcap & (1UL << num);
 }
 
+/* System capability check for constant caps */
+static inline bool cpus_have_const_cap(int num)
+{
+       if (num >= ARM64_NCAPS)
+               return false;
+       return static_branch_unlikely(&cpu_hwcap_keys[num]);
+}
+
 static inline bool cpus_have_cap(unsigned int num)
 {
        if (num >= ARM64_NCAPS)
                return false;
-       if (__builtin_constant_p(num))
-               return static_branch_unlikely(&cpu_hwcap_keys[num]);
-       else
-               return test_bit(num, cpu_hwcaps);
+       return test_bit(num, cpu_hwcaps);
 }
 
 static inline void cpus_set_cap(unsigned int num)
 
 static inline bool system_supports_32bit_el0(void)
 {
-       return cpus_have_cap(ARM64_HAS_32BIT_EL0);
+       return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
 }
 
 static inline bool system_supports_mixed_endian_el0(void)
 
 static bool __maybe_unused
 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
 {
-       return (cpus_have_cap(ARM64_HAS_PAN) && !cpus_have_cap(ARM64_HAS_UAO));
+       return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
 }
 
                memset(childregs, 0, sizeof(struct pt_regs));
                childregs->pstate = PSR_MODE_EL1h;
                if (IS_ENABLED(CONFIG_ARM64_UAO) &&
-                   cpus_have_cap(ARM64_HAS_UAO))
+                   cpus_have_const_cap(ARM64_HAS_UAO))
                        childregs->pstate |= PSR_UAO_BIT;
                p->thread.cpu_context.x19 = stack_start;
                p->thread.cpu_context.x20 = stk_sz;
 
 }
 
 #ifdef CONFIG_ARM64
-static DEFINE_STATIC_KEY_FALSE(is_cavium_thunderx);
 
 static u64 __maybe_unused gic_read_iar(void)
 {
-       if (static_branch_unlikely(&is_cavium_thunderx))
+       if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
                return gic_read_iar_cavium_thunderx();
        else
                return gic_read_iar_common();
        .select = gic_irq_domain_select,
 };
 
-static void gicv3_enable_quirks(void)
-{
-#ifdef CONFIG_ARM64
-       if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_23154))
-               static_branch_enable(&is_cavium_thunderx);
-#endif
-}
-
 static int __init gic_init_bases(void __iomem *dist_base,
                                 struct redist_region *rdist_regs,
                                 u32 nr_redist_regions,
        gic_data.nr_redist_regions = nr_redist_regions;
        gic_data.redist_stride = redist_stride;
 
-       gicv3_enable_quirks();
-
        /*
         * Find out how many interrupts are supported.
         * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)