The commit
52817587e706 ('x86/cpufeatures: Disentangle SSBD enumeration') from
upstream disentangles SSBD enumeration. We did not backport that commit because
we did not have what to disentangle on UEK4. Our cpufeature was already
synthetic.
That commit also renames X86_FEATURE_AMD_SSBD to X86_FEATURE_LS_CFG_SSBD. We
need this rename in order to not have conflicting cpu features while
backporting commit
6ac2f49edb1e ('x86/bugs: Add AMD's SPEC_CTRL MSR usage')
from upstream which introduces SPEC_CTRL MSR, which will be the prefered
method.
Orabug:
28870524
CVE: CVE-2018-3639
Signed-off-by: Mihai Carabas <mihai.carabas@oracle.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: Brian Maly <brian.maly@oracle.com>
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
#define X86_FEATURE_VIRT_SSBD ( 7*32+16) /* Virtualized Speculative Store Bypass Disable */
#define X86_FEATURE_ZEN ( 7*32+17) /* "" CPU is AMD family 0x17 (Zen) */
-#define X86_FEATURE_AMD_SSBD ( 7*32+18) /* "" AMD RDS implementation */
+#define X86_FEATURE_LS_CFG_SSBD ( 7*32+18) /* "" AMD SSBD implementation via LS_CFG MSR */
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
#define X86_FEATURE_IBRS ( 7*32+20) /* Control Speculation Control */
#define X86_FEATURE_STIBP ( 7*32+21) /* Single Thread Indirect Branch Predictors */
*/
if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
setup_force_cpu_cap(X86_FEATURE_SSBD);
- setup_force_cpu_cap(X86_FEATURE_AMD_SSBD);
+ setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
}
}
/* AMD CPUs don't reset SS attributes on SYSRET */
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
- if (boot_cpu_has(X86_FEATURE_AMD_SSBD) ||
+ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
cpu_has(c, X86_FEATURE_VIRT_SSBD)) {
set_cpu_cap(c, X86_FEATURE_SSBD);
- set_cpu_cap(c, X86_FEATURE_AMD_SSBD);
+ set_cpu_cap(c, X86_FEATURE_LS_CFG_SSBD);
}
}
* If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
* MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
*/
- if (!static_cpu_has(X86_FEATURE_AMD_SSBD) &&
+ if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
!static_cpu_has(X86_FEATURE_VIRT_SSBD))
return;
if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
- else if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
+ else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
wrmsrl(MSR_AMD64_LS_CFG, msrval);
}
{
if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
amd_set_ssb_virt_state(tifn);
- else if (static_cpu_has(X86_FEATURE_AMD_SSBD))
+ else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
amd_set_core_ssb_state(tifn);
else
intel_set_ssb_state(tifn);
if ( !boot_cpu_has(X86_FEATURE_IBPB) )
entry->ebx &= ~(1u << KVM_CPUID_BIT_IBPB);
- if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
+ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
entry->ebx |= KF(VIRT_SSBD);
break;
}