Move the handful of MSR_IA32_VMX_MISC bit defines that are currently in
msr-indx.h to vmx.h so that all of the VMX_MISC defines and wrappers can
be found in a single location.
Opportunistically use BIT_ULL() instead of open coding hex values, add
defines for feature bits that are architecturally defined, and move the
defines down in the file so that they are colocated with the helpers for
getting fields from VMX_MISC.
No functional change intended.
Cc: Shan Kang <shan.kang@intel.com>
Cc: Kai Huang <kai.huang@intel.com>
Signed-off-by: Xin Li <xin3.li@intel.com>
[sean: split to separate patch, write changelog]
Reviewed-by: Zhao Liu <zhao1.liu@intel.com>
Reviewed-by: Kai Huang <kai.huang@intel.com>
Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>
Link: https://lore.kernel.org/r/20240605231918.2915961-9-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
 
 #define MSR_IA32_SMBA_BW_BASE          0xc0000280
 #define MSR_IA32_EVT_CFG_BASE          0xc0000400
 
-/* MSR_IA32_VMX_MISC bits */
-#define MSR_IA32_VMX_MISC_INTEL_PT                 (1ULL << 14)
-#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
-#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE   0x1F
-
 /* AMD-V MSRs */
 #define MSR_VM_CR                       0xc0010114
 #define MSR_VM_IGNNE                    0xc0010115
 
 
 #define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR     0x000011ff
 
-#define VMX_MISC_PREEMPTION_TIMER_RATE_MASK    0x0000001f
-#define VMX_MISC_SAVE_EFER_LMA                 0x00000020
-#define VMX_MISC_ACTIVITY_HLT                  0x00000040
-#define VMX_MISC_ACTIVITY_WAIT_SIPI            0x00000100
-#define VMX_MISC_ZERO_LEN_INS                  0x40000000
-#define VMX_MISC_MSR_LIST_MULTIPLIER           512
-
 /* VMFUNC functions */
 #define VMFUNC_CONTROL_BIT(x)  BIT((VMX_FEATURE_##x & 0x1f) - 28)
 
        return revision | ((u64)size << 32) | ((u64)memtype << 50);
 }
 
+#define VMX_MISC_PREEMPTION_TIMER_RATE_MASK    GENMASK_ULL(4, 0)
+#define VMX_MISC_SAVE_EFER_LMA                 BIT_ULL(5)
+#define VMX_MISC_ACTIVITY_HLT                  BIT_ULL(6)
+#define VMX_MISC_ACTIVITY_SHUTDOWN             BIT_ULL(7)
+#define VMX_MISC_ACTIVITY_WAIT_SIPI            BIT_ULL(8)
+#define VMX_MISC_INTEL_PT                      BIT_ULL(14)
+#define VMX_MISC_RDMSR_IN_SMM                  BIT_ULL(15)
+#define VMX_MISC_VMXOFF_BLOCK_SMI              BIT_ULL(28)
+#define VMX_MISC_VMWRITE_SHADOW_RO_FIELDS      BIT_ULL(29)
+#define VMX_MISC_ZERO_LEN_INS                  BIT_ULL(30)
+#define VMX_MISC_MSR_LIST_MULTIPLIER           512
+
 static inline int vmx_misc_preemption_timer_rate(u64 vmx_misc)
 {
        return vmx_misc & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
 
 static inline bool cpu_has_vmx_shadow_vmcs(void)
 {
        /* check if the cpu supports writing r/o exit information fields */
-       if (!(vmcs_config.misc & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
+       if (!(vmcs_config.misc & VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
                return false;
 
        return vmcs_config.cpu_based_2nd_exec_ctrl &
 
 static inline bool cpu_has_vmx_intel_pt(void)
 {
-       return (vmcs_config.misc & MSR_IA32_VMX_MISC_INTEL_PT) &&
+       return (vmcs_config.misc & VMX_MISC_INTEL_PT) &&
                (vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_PT_USE_GPA) &&
                (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_RTIT_CTL);
 }
 
 {
        msrs->misc_low = (u32)vmcs_conf->misc & VMX_MISC_SAVE_EFER_LMA;
        msrs->misc_low |=
-               MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
+               VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
                VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
                VMX_MISC_ACTIVITY_HLT |
                VMX_MISC_ACTIVITY_WAIT_SIPI;
 
 static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
 {
        return to_vmx(vcpu)->nested.msrs.misc_low &
-               MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
+               VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
 }
 
 static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)