Expose the new virtualized architectural mechanism, VIRT_SSBD, for using
speculative store bypass disable (SSBD) under SVM. This will allow guests
to use SSBD on hardware that uses non-architectural mechanisms for enabling
SSBD.
[ tglx: Folded the migration fixup from Paolo Bonzini ]
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Orabug:
28063992
CVE: CVE-2018-3639
(cherry picked from commit
bc226f07)
Signed-off-by: Mihai Carabas <mihai.carabas@oracle.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Signed-off-by: Brian Maly <brian.maly@oracle.com>
Conflicts:
arch/x86/include/asm/kvm_host.h
arch/x86/kernel/cpu/common.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
[
We did not have cpu_has_high_real_mode_segbase entry at all.
Also msr_info is not in this patchset, I will take care of it
in Orabug:
28069548 in a future patchset.
]
Signed-off-by: Brian Maly <brian.maly@oracle.com>
int (*hardware_setup)(void); /* __init */
void (*hardware_unsetup)(void); /* __exit */
bool (*cpu_has_accelerated_tpr)(void);
+ bool (*has_emulated_msr)(int index);
void (*cpuid_update)(struct kvm_vcpu *vcpu);
/* Create, but do not attach this VCPU */
/* AMD CPUs don't reset SS attributes on SYSRET */
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
- if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
+ if (boot_cpu_has(X86_FEATURE_AMD_SSBD) ||
+ cpu_has(c, X86_FEATURE_VIRT_SSBD)) {
set_cpu_cap(c, X86_FEATURE_SSBD);
set_cpu_cap(c, X86_FEATURE_AMD_SSBD);
}
/* CPUID[eax=0x80000008].ebx */
#define KVM_CPUID_BIT_IBPB_SUPPORT 12
+#define KVM_CPUID_BIT_VIRT_SSBD 25
#define KF(x) bit(KVM_CPUID_BIT_##x)
/* cpuid 0x80000008.ebx */
const u32 kvm_cpuid_80000008_ebx_x86_features =
- KF(IBPB_SUPPORT);
+ KF(IBPB_SUPPORT) | KF(VIRT_SSBD);
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();
g_phys_as = phys_as;
entry->eax = g_phys_as | (virt_as << 8);
entry->edx = 0;
+
+ if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
+ entry->ebx |= KF(VIRT_SSBD);
+
entry->ebx &= kvm_cpuid_80000008_ebx_x86_features;
+
if ( !boot_cpu_has(X86_FEATURE_IBPB) )
- entry->ebx &= !(1u << KVM_CPUID_BIT_IBPB_SUPPORT);
+ entry->ebx &= ~(1u << KVM_CPUID_BIT_IBPB_SUPPORT);
+
+ if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
+ entry->ebx |= KF(VIRT_SSBD);
break;
}
case 0x80000019:
case MSR_IA32_SPEC_CTRL:
*data = svm->spec_ctrl;
break;
+ case MSR_AMD64_VIRT_SPEC_CTRL:
+ *data = svm->virt_spec_ctrl;
+ break;
case MSR_IA32_UCODE_REV:
*data = 0x01000065;
break;
case MSR_IA32_SPEC_CTRL:
svm->spec_ctrl = data;
break;
+ case MSR_AMD64_VIRT_SPEC_CTRL:
+ if (data & ~SPEC_CTRL_SSBD)
+ return 1;
+
+ svm->virt_spec_ctrl = data;
+ break;
default:
return kvm_set_msr_common(vcpu, msr);
}
return false;
}
+static bool svm_has_emulated_msr(int index)
+{
+ return true;
+}
+
static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
{
return 0;
.hardware_enable = svm_hardware_enable,
.hardware_disable = svm_hardware_disable,
.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
+ .has_emulated_msr = svm_has_emulated_msr,
.vcpu_create = svm_create_vcpu,
.vcpu_free = svm_free_vcpu,
local_irq_enable();
}
+static bool vmx_has_emulated_msr(int index)
+{
+ switch (index) {
+ case MSR_AMD64_VIRT_SPEC_CTRL:
+ /* This is AMD only. */
+ return false;
+ default:
+ return true;
+ }
+}
+
static bool vmx_mpx_supported(void)
{
return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) &&
.hardware_enable = hardware_enable,
.hardware_disable = hardware_disable,
.cpu_has_accelerated_tpr = report_flexpriority,
+ .has_emulated_msr = vmx_has_emulated_msr,
.vcpu_create = vmx_create_vcpu,
.vcpu_free = vmx_free_vcpu,