]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
x86/msr: Rename 'wrmsrl_safe()' to 'wrmsrq_safe()'
authorIngo Molnar <mingo@kernel.org>
Wed, 9 Apr 2025 20:28:57 +0000 (22:28 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 10 Apr 2025 09:58:44 +0000 (11:58 +0200)
Suggested-by: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Juergen Gross <jgross@suse.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Xin Li <xin@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
21 files changed:
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/intel/knc.c
arch/x86/events/intel/lbr.c
arch/x86/events/intel/p4.c
arch/x86/events/intel/p6.c
arch/x86/include/asm/msr.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bus_lock.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/mce/inject.c
arch/x86/kernel/cpu/mce/intel.c
arch/x86/kernel/cpu/resctrl/core.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/x86.c
arch/x86/lib/msr.c
drivers/cpufreq/intel_pstate.c
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
drivers/platform/x86/intel/turbo_max_3.c
drivers/powercap/intel_rapl_msr.c
drivers/thermal/intel/therm_throt.c

index 5a2eda65db0a0c96441f118ed332e665de7cd5e8..85b55c1dc1622838e65f0c9741a43b0b909bd35c 100644 (file)
@@ -317,7 +317,7 @@ bool check_hw_exists(struct pmu *pmu, unsigned long *cntr_mask,
        if (rdmsrq_safe(reg, &val))
                goto msr_fail;
        val ^= 0xffffUL;
-       ret = wrmsrl_safe(reg, val);
+       ret = wrmsrq_safe(reg, val);
        ret |= rdmsrq_safe(reg, &val_new);
        if (ret || val != val_new)
                goto msr_fail;
index 1e8c89f5f6edb43a007be19fb4d4c06604d40c1e..394fa83b537bbc6852e6cffffb63a1fb31944872 100644 (file)
@@ -2976,13 +2976,13 @@ static void intel_pmu_reset(void)
        pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
 
        for_each_set_bit(idx, cntr_mask, INTEL_PMC_MAX_GENERIC) {
-               wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
-               wrmsrl_safe(x86_pmu_event_addr(idx),  0ull);
+               wrmsrq_safe(x86_pmu_config_addr(idx), 0ull);
+               wrmsrq_safe(x86_pmu_event_addr(idx),  0ull);
        }
        for_each_set_bit(idx, fixed_cntr_mask, INTEL_PMC_MAX_FIXED) {
                if (fixed_counter_disabled(idx, cpuc->pmu))
                        continue;
-               wrmsrl_safe(x86_pmu_fixed_ctr_addr(idx), 0ull);
+               wrmsrq_safe(x86_pmu_fixed_ctr_addr(idx), 0ull);
        }
 
        if (ds)
@@ -5621,7 +5621,7 @@ static bool check_msr(unsigned long msr, u64 mask)
        if (is_lbr_from(msr))
                val_tmp = lbr_from_signext_quirk_wr(val_tmp);
 
-       if (wrmsrl_safe(msr, val_tmp) ||
+       if (wrmsrq_safe(msr, val_tmp) ||
            rdmsrq_safe(msr, &val_new))
                return false;
 
index afb7801bdeceacc2ddebf8b015e257386eb08e99..425f6e6eed89fbf4c7e4517df3efeaad3fea6852 100644 (file)
@@ -182,7 +182,7 @@ knc_pmu_disable_event(struct perf_event *event)
        val = hwc->config;
        val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
 
-       (void)wrmsrl_safe(hwc->config_base + hwc->idx, val);
+       (void)wrmsrq_safe(hwc->config_base + hwc->idx, val);
 }
 
 static void knc_pmu_enable_event(struct perf_event *event)
@@ -193,7 +193,7 @@ static void knc_pmu_enable_event(struct perf_event *event)
        val = hwc->config;
        val |= ARCH_PERFMON_EVENTSEL_ENABLE;
 
-       (void)wrmsrl_safe(hwc->config_base + hwc->idx, val);
+       (void)wrmsrq_safe(hwc->config_base + hwc->idx, val);
 }
 
 static inline u64 knc_pmu_get_status(void)
index b55c59ab53d2fd4e88a4930e97cc78da2587ef38..48894f7a099f52b6963036b87dc76fb90c2bfa20 100644 (file)
@@ -1602,7 +1602,7 @@ void __init intel_pmu_arch_lbr_init(void)
                goto clear_arch_lbr;
 
        /* Apply the max depth of Arch LBR */
-       if (wrmsrl_safe(MSR_ARCH_LBR_DEPTH, lbr_nr))
+       if (wrmsrq_safe(MSR_ARCH_LBR_DEPTH, lbr_nr))
                goto clear_arch_lbr;
 
        x86_pmu.lbr_depth_mask = eax.split.lbr_depth_mask;
index 36d4cd23182c0e17b22bca583138a1bb789371e6..24d811a9608a521a44b7410172143ee4b03d769a 100644 (file)
@@ -897,8 +897,8 @@ static void p4_pmu_disable_pebs(void)
         * So at moment let leave metrics turned on forever -- it's
         * ok for now but need to be revisited!
         *
-        * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, 0);
-        * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, 0);
+        * (void)wrmsrq_safe(MSR_IA32_PEBS_ENABLE, 0);
+        * (void)wrmsrq_safe(MSR_P4_PEBS_MATRIX_VERT, 0);
         */
 }
 
@@ -911,7 +911,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event)
         * state we need to clear P4_CCCR_OVF, otherwise interrupt get
         * asserted again and again
         */
-       (void)wrmsrl_safe(hwc->config_base,
+       (void)wrmsrq_safe(hwc->config_base,
                p4_config_unpack_cccr(hwc->config) & ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
 }
 
@@ -944,8 +944,8 @@ static void p4_pmu_enable_pebs(u64 config)
 
        bind = &p4_pebs_bind_map[idx];
 
-       (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs);
-       (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT,      (u64)bind->metric_vert);
+       (void)wrmsrq_safe(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs);
+       (void)wrmsrq_safe(MSR_P4_PEBS_MATRIX_VERT,      (u64)bind->metric_vert);
 }
 
 static void __p4_pmu_enable_event(struct perf_event *event)
@@ -979,8 +979,8 @@ static void __p4_pmu_enable_event(struct perf_event *event)
         */
        p4_pmu_enable_pebs(hwc->config);
 
-       (void)wrmsrl_safe(escr_addr, escr_conf);
-       (void)wrmsrl_safe(hwc->config_base,
+       (void)wrmsrq_safe(escr_addr, escr_conf);
+       (void)wrmsrq_safe(hwc->config_base,
                                (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
 }
 
@@ -1398,7 +1398,7 @@ __init int p4_pmu_init(void)
         */
        for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
                reg = x86_pmu_config_addr(i);
-               wrmsrl_safe(reg, 0ULL);
+               wrmsrq_safe(reg, 0ULL);
        }
 
        return 0;
index 6a1fdbc6d20fefc8b9e9a2c98d25c0c6ff1305ef..35917a776bec1d11af5ca42321e66687e8881762 100644 (file)
@@ -163,7 +163,7 @@ p6_pmu_disable_event(struct perf_event *event)
        struct hw_perf_event *hwc = &event->hw;
        u64 val = P6_NOP_EVENT;
 
-       (void)wrmsrl_safe(hwc->config_base, val);
+       (void)wrmsrq_safe(hwc->config_base, val);
 }
 
 static void p6_pmu_enable_event(struct perf_event *event)
@@ -180,7 +180,7 @@ static void p6_pmu_enable_event(struct perf_event *event)
         * to actually enable the events.
         */
 
-       (void)wrmsrl_safe(hwc->config_base, val);
+       (void)wrmsrq_safe(hwc->config_base, val);
 }
 
 PMU_FORMAT_ATTR(event, "config:0-7"    );
index 58554b5e79112f19258e4651964f14d2601167c0..2317575542361582cf4d43e600020363fe838a76 100644 (file)
@@ -316,7 +316,7 @@ static __always_inline void wrmsrns(u32 msr, u64 val)
 /*
  * 64-bit version of wrmsr_safe():
  */
-static inline int wrmsrl_safe(u32 msr, u64 val)
+static inline int wrmsrq_safe(u32 msr, u64 val)
 {
        return wrmsr_safe(msr, (u32)val,  (u32)(val >> 32));
 }
@@ -385,7 +385,7 @@ static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
 }
 static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
 {
-       return wrmsrl_safe(msr_no, q);
+       return wrmsrq_safe(msr_no, q);
 }
 static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
 {
index 3c49a92931e8c388f4a31a239b3320460d2297cc..8937923c9f813bfd3caeba240a370ec38c56a2de 100644 (file)
@@ -612,7 +612,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
        if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) {
                if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB))
                        setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
-               else if (c->x86 >= 0x19 && !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
+               else if (c->x86 >= 0x19 && !wrmsrq_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
                        setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
                        setup_force_cpu_cap(X86_FEATURE_SBPB);
                }
@@ -790,7 +790,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
        if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
                if (!rdmsrq_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
                        value |= 0x1E;
-                       wrmsrl_safe(MSR_F15H_IC_CFG, value);
+                       wrmsrq_safe(MSR_F15H_IC_CFG, value);
                }
        }
 
@@ -840,7 +840,7 @@ void init_spectral_chicken(struct cpuinfo_x86 *c)
        if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
                if (!rdmsrq_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) {
                        value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT;
-                       wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
+                       wrmsrq_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
                }
        }
 #endif
index 18bd8a876bb8d8a26f4e0ead48c95e0da5ff5869..a18d0f2ea8326e9ad97de4f20ecf5d6c05715273 100644 (file)
@@ -101,7 +101,7 @@ static bool split_lock_verify_msr(bool on)
                ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
        else
                ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
-       if (wrmsrl_safe(MSR_TEST_CTRL, ctrl))
+       if (wrmsrq_safe(MSR_TEST_CTRL, ctrl))
                return false;
        rdmsrq(MSR_TEST_CTRL, tmp);
        return ctrl == tmp;
index dfccea1f120b8e1a953addf786e5e14109d82e42..bb986ba8b90ca02edb2bd1eb6269ccaba33686be 100644 (file)
@@ -158,7 +158,7 @@ static void ppin_init(struct cpuinfo_x86 *c)
 
        /* If PPIN is disabled, try to enable */
        if (!(val & 2UL)) {
-               wrmsrl_safe(info->msr_ppin_ctl,  val | 2UL);
+               wrmsrq_safe(info->msr_ppin_ctl,  val | 2UL);
                rdmsrq_safe(info->msr_ppin_ctl, &val);
        }
 
@@ -2114,15 +2114,15 @@ static inline void idt_syscall_init(void)
                 * This does not cause SYSENTER to jump to the wrong location, because
                 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
                 */
-               wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
-               wrmsrl_safe(MSR_IA32_SYSENTER_ESP,
+               wrmsrq_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
+               wrmsrq_safe(MSR_IA32_SYSENTER_ESP,
                            (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1));
-               wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
+               wrmsrq_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
        } else {
                wrmsrl_cstar((unsigned long)entry_SYSCALL32_ignore);
-               wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
-               wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
-               wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
+               wrmsrq_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
+               wrmsrq_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
+               wrmsrq_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
        }
 
        /*
index 75ff605c8accf86869131d595c933e6768b34225..5226f8f7fc98c4b47716b52ab63474d1a2d823ce 100644 (file)
@@ -747,9 +747,9 @@ static void check_hw_inj_possible(void)
 
                toggle_hw_mce_inject(cpu, true);
 
-               wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), status);
+               wrmsrq_safe(mca_msr_reg(bank, MCA_STATUS), status);
                rdmsrq_safe(mca_msr_reg(bank, MCA_STATUS), &status);
-               wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), 0);
+               wrmsrq_safe(mca_msr_reg(bank, MCA_STATUS), 0);
 
                if (!status) {
                        hw_injection_possible = false;
index 9b9ef7d421dcc5e0eb5a13af98242fa5c3264cd9..efcf21e9552e33ccf5f229bf6f9604f9a282fda6 100644 (file)
@@ -463,7 +463,7 @@ static void intel_imc_init(struct cpuinfo_x86 *c)
                if (rdmsrq_safe(MSR_ERROR_CONTROL, &error_control))
                        return;
                error_control |= 2;
-               wrmsrl_safe(MSR_ERROR_CONTROL, error_control);
+               wrmsrq_safe(MSR_ERROR_CONTROL, error_control);
                break;
        }
 }
index 6d408f78873cc98056e6b7942658f79f9e6644f0..280d6900726becbed4f57f21f57ad7015fe691a7 100644 (file)
@@ -145,7 +145,7 @@ static inline void cache_alloc_hsw_probe(void)
        struct rdt_resource *r  = &hw_res->r_resctrl;
        u64 max_cbm = BIT_ULL_MASK(20) - 1, l3_cbm_0;
 
-       if (wrmsrl_safe(MSR_IA32_L3_CBM_BASE, max_cbm))
+       if (wrmsrq_safe(MSR_IA32_L3_CBM_BASE, max_cbm))
                return;
 
        rdmsrq(MSR_IA32_L3_CBM_BASE, l3_cbm_0);
index 0bc708ee278877ffa00a26a21e4c948214b4bc8e..a4aabd666665d26d70beab8bfd95afbd8f41b697 100644 (file)
@@ -3119,7 +3119,7 @@ static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
         * back to WBINVD if this faults so as not to make any problems worse
         * by leaving stale encrypted data in the cache.
         */
-       if (WARN_ON_ONCE(wrmsrl_safe(MSR_AMD64_VM_PAGE_FLUSH, addr | asid)))
+       if (WARN_ON_ONCE(wrmsrq_safe(MSR_AMD64_VM_PAGE_FLUSH, addr | asid)))
                goto do_wbinvd;
 
        return;
index 134c4a186e9ca8a294bdf58ef09d7e07b57ceced..b922428236aaa775d76a7a5424fbc926c5dd200e 100644 (file)
@@ -593,7 +593,7 @@ static int kvm_probe_user_return_msr(u32 msr)
        ret = rdmsrq_safe(msr, &val);
        if (ret)
                goto out;
-       ret = wrmsrl_safe(msr, val);
+       ret = wrmsrq_safe(msr, val);
 out:
        preempt_enable();
        return ret;
@@ -644,7 +644,7 @@ int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
        value = (value & mask) | (msrs->values[slot].host & ~mask);
        if (value == msrs->values[slot].curr)
                return 0;
-       err = wrmsrl_safe(kvm_uret_msrs_list[slot], value);
+       err = wrmsrq_safe(kvm_uret_msrs_list[slot], value);
        if (err)
                return 1;
 
@@ -13654,7 +13654,7 @@ int kvm_spec_ctrl_test_value(u64 value)
 
        if (rdmsrq_safe(MSR_IA32_SPEC_CTRL, &saved_value))
                ret = 1;
-       else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value))
+       else if (wrmsrq_safe(MSR_IA32_SPEC_CTRL, value))
                ret = 1;
        else
                wrmsrq(MSR_IA32_SPEC_CTRL, saved_value);
index d533881ce9a0c435391959b1f8b78a7ca356eda7..4ef7c6dcbea68fef8b677b573ac7f860585ae4f6 100644 (file)
@@ -58,7 +58,7 @@ static int msr_read(u32 msr, struct msr *m)
  */
 static int msr_write(u32 msr, struct msr *m)
 {
-       return wrmsrl_safe(msr, m->q);
+       return wrmsrq_safe(msr, m->q);
 }
 
 static inline int __flip_bit(u32 msr, u8 bit, bool set)
index a57f494f10326657e64cbbeaa2f8e8d82fd24271..83a43c64c4048b6e6a864e4a80ce586c41924586 100644 (file)
@@ -1894,7 +1894,7 @@ void notify_hwp_interrupt(void)
        return;
 
 ack_intr:
-       wrmsrl_safe(MSR_HWP_STATUS, 0);
+       wrmsrq_safe(MSR_HWP_STATUS, 0);
        raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
 }
 
index 1ebf5624139a72f8af2f7c3e5e72368fe6e12fc2..4368d598beac35adda21a0d44cd3547fe149b753 100644 (file)
@@ -211,7 +211,7 @@ static void isst_restore_msr_local(int cpu)
                hash_for_each_possible(isst_hash, sst_cmd, hnode,
                                       punit_msr_white_list[i]) {
                        if (!sst_cmd->mbox_cmd_type && sst_cmd->cpu == cpu)
-                               wrmsrl_safe(sst_cmd->cmd, sst_cmd->data);
+                               wrmsrq_safe(sst_cmd->cmd, sst_cmd->data);
                }
        }
        mutex_unlock(&isst_hash_lock);
index c411ec53145e63182db230caa827f4435b54b7d4..7e538bbd5b5088b8a48f96e0d13ba9e9a1579386 100644 (file)
@@ -41,7 +41,7 @@ static int get_oc_core_priority(unsigned int cpu)
        value = cmd << MSR_OC_MAILBOX_CMD_OFFSET;
        /* Set the busy bit to indicate OS is trying to issue command */
        value |=  BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT);
-       ret = wrmsrl_safe(MSR_OC_MAILBOX, value);
+       ret = wrmsrq_safe(MSR_OC_MAILBOX, value);
        if (ret) {
                pr_debug("cpu %d OC mailbox write failed\n", cpu);
                return ret;
index 9100300ff868c54049551f500e54ffb940f8a65b..c7f15b01fad878455a7368a60c3bae037d43b309 100644 (file)
@@ -123,7 +123,7 @@ static void rapl_msr_update_func(void *info)
        val &= ~ra->mask;
        val |= ra->value;
 
-       ra->err = wrmsrl_safe(ra->reg.msr, val);
+       ra->err = wrmsrq_safe(ra->reg.msr, val);
 }
 
 static int rapl_msr_write_raw(int cpu, struct reg_action *ra)
index c4f3f13cd9e804bda09199246805546318387ff9..debc94e2dc169775752cb88b6aaa5fa308f09902 100644 (file)
@@ -643,7 +643,7 @@ static void notify_thresholds(__u64 msr_val)
 
 void __weak notify_hwp_interrupt(void)
 {
-       wrmsrl_safe(MSR_HWP_STATUS, 0);
+       wrmsrq_safe(MSR_HWP_STATUS, 0);
 }
 
 /* Thermal transition interrupt handler */