u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
 EXPORT_SYMBOL_GPL(x86_pred_cmd);
 
-static u64 __ro_after_init ia32_cap;
+static u64 __ro_after_init x86_arch_cap_msr;
 
 static DEFINE_MUTEX(spec_ctrl_mutex);
 
                x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
        }
 
-       ia32_cap = x86_read_arch_cap_msr();
+       x86_arch_cap_msr = x86_read_arch_cap_msr();
 
        /* Select the proper CPU mitigations before patching alternatives: */
        spectre_v1_select_mitigation();
         * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
         * update is required.
         */
-       if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
-           !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
+       if ( (x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
+           !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
                taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
 
        /*
         * be propagated to uncore buffers, clearing the Fill buffers on idle
         * is required irrespective of SMT state.
         */
-       if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
+       if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
                static_branch_enable(&mds_idle_clear);
 
        /*
         * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
         * affected systems.
         */
-       if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
+       if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
            (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
             boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
-            !(ia32_cap & ARCH_CAP_MDS_NO)))
+            !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))
                mmio_mitigation = MMIO_MITIGATION_VERW;
        else
                mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
        if (rfds_mitigation == RFDS_MITIGATION_OFF)
                return;
 
-       if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
+       if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
                setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
        else
                rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
         * are only exposed to SRBDS when TSX is enabled or when CPU is affected
         * by Processor MMIO Stale Data vulnerability.
         */
-       if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
+       if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
            !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
                srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
        else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
        /* Will verify below that mitigation _can_ be disabled */
 
        /* No microcode */
-       if (!(ia32_cap & ARCH_CAP_GDS_CTRL)) {
+       if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
                if (gds_mitigation == GDS_MITIGATION_FORCE) {
                        /*
                         * This only needs to be done on the boot CPU so do it
 /* Disable in-kernel use of non-RSB RET predictors */
 static void __init spec_ctrl_disable_kernel_rrsba(void)
 {
-       u64 ia32_cap;
+       u64 x86_arch_cap_msr;
 
        if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
                return;
 
-       ia32_cap = x86_read_arch_cap_msr();
+       x86_arch_cap_msr = x86_read_arch_cap_msr();
 
-       if (ia32_cap & ARCH_CAP_RRSBA) {
+       if (x86_arch_cap_msr & ARCH_CAP_RRSBA) {
                x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
                update_spec_ctrl(x86_spec_ctrl_base);
        }
        if (sched_smt_active()) {
                static_branch_enable(&mds_idle_clear);
        } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
-                  (ia32_cap & ARCH_CAP_FBSDP_NO)) {
+                  (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
                static_branch_disable(&mds_idle_clear);
        }
 }
        else if  (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
                return "; BHI: SW loop, KVM: SW loop";
        else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
-                !(ia32_cap & ARCH_CAP_RRSBA))
+                !(x86_arch_cap_msr & ARCH_CAP_RRSBA))
                return "; BHI: Retpoline";
        else if  (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
                return "; BHI: Syscall hardening, KVM: SW loop";
 
 
 u64 x86_read_arch_cap_msr(void)
 {
-       u64 ia32_cap = 0;
+       u64 x86_arch_cap_msr = 0;
 
        if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
-               rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+               rdmsrl(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
 
-       return ia32_cap;
+       return x86_arch_cap_msr;
 }
 
-static bool arch_cap_mmio_immune(u64 ia32_cap)
+static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr)
 {
-       return (ia32_cap & ARCH_CAP_FBSDP_NO &&
-               ia32_cap & ARCH_CAP_PSDP_NO &&
-               ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
+       return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO &&
+               x86_arch_cap_msr & ARCH_CAP_PSDP_NO &&
+               x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO);
 }
 
-static bool __init vulnerable_to_rfds(u64 ia32_cap)
+static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
 {
        /* The "immunity" bit trumps everything else: */
-       if (ia32_cap & ARCH_CAP_RFDS_NO)
+       if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO)
                return false;
 
        /*
         * indicate that mitigation is needed because guest is running on a
         * vulnerable hardware or may migrate to such hardware:
         */
-       if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
+       if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
                return true;
 
        /* Only consult the blacklist when there is no enumeration: */
 
 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 {
-       u64 ia32_cap = x86_read_arch_cap_msr();
+       u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
 
        /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
        if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
-           !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
+           !(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO))
                setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
 
        if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
                setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
 
        if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
-           !(ia32_cap & ARCH_CAP_SSB_NO) &&
+           !(x86_arch_cap_msr & ARCH_CAP_SSB_NO) &&
           !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
                setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
 
         * Don't use AutoIBRS when SNP is enabled because it degrades host
         * userspace indirect branch performance.
         */
-       if ((ia32_cap & ARCH_CAP_IBRS_ALL) ||
+       if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL) ||
            (cpu_has(c, X86_FEATURE_AUTOIBRS) &&
             !cpu_feature_enabled(X86_FEATURE_SEV_SNP))) {
                setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
                if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
-                   !(ia32_cap & ARCH_CAP_PBRSB_NO))
+                   !(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO))
                        setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
        }
 
        if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
-           !(ia32_cap & ARCH_CAP_MDS_NO)) {
+           !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)) {
                setup_force_cpu_bug(X86_BUG_MDS);
                if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
                        setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
         * TSX_CTRL check alone is not sufficient for cases when the microcode
         * update is not present or running as guest that don't get TSX_CTRL.
         */
-       if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
+       if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO) &&
            (cpu_has(c, X86_FEATURE_RTM) ||
-            (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
+            (x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)))
                setup_force_cpu_bug(X86_BUG_TAA);
 
        /*
         * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
         * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
         */
-       if (!arch_cap_mmio_immune(ia32_cap)) {
+       if (!arch_cap_mmio_immune(x86_arch_cap_msr)) {
                if (cpu_matches(cpu_vuln_blacklist, MMIO))
                        setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
                else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
        }
 
        if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
-               if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
+               if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (x86_arch_cap_msr & ARCH_CAP_RSBA))
                        setup_force_cpu_bug(X86_BUG_RETBLEED);
        }
 
         * disabling AVX2. The only way to do this in HW is to clear XCR0[2],
         * which means that AVX will be disabled.
         */
-       if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) &&
+       if (cpu_matches(cpu_vuln_blacklist, GDS) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO) &&
            boot_cpu_has(X86_FEATURE_AVX))
                setup_force_cpu_bug(X86_BUG_GDS);
 
-       if (vulnerable_to_rfds(ia32_cap))
+       if (vulnerable_to_rfds(x86_arch_cap_msr))
                setup_force_cpu_bug(X86_BUG_RFDS);
 
        /* When virtualized, eIBRS could be hidden, assume vulnerable */
-       if (!(ia32_cap & ARCH_CAP_BHI_NO) &&
+       if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) &&
            !cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
            (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
             boot_cpu_has(X86_FEATURE_HYPERVISOR)))
                return;
 
        /* Rogue Data Cache Load? No! */
-       if (ia32_cap & ARCH_CAP_RDCL_NO)
+       if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO)
                return;
 
        setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);