.cpuid_needs_ecx = true, .cpuid_ecx = 1,
.cpuid_reg = R_EAX,
.tcg_features = 0,
- .unmigratable_flags = FEAT_XSAVES,
},
};
#define MSR_VM_HSAVE_PA 0xc0010117
#define MSR_IA32_BNDCFGS 0x00000d90
+#define MSR_IA32_XSS 0x00000da0
#define XSTATE_FP (1ULL << 0)
#define XSTATE_SSE (1ULL << 1)
uint64_t xstate_bv;
uint64_t xcr0;
+ uint64_t xss;
TPRAccess tpr_access_type;
} CPUX86State;
static bool has_msr_hv_vapic;
static bool has_msr_hv_tsc;
static bool has_msr_mtrr;
+static bool has_msr_xss;
static bool has_msr_architectural_pmu;
static uint32_t num_architectural_pmu_counters;
has_msr_bndcfgs = true;
continue;
}
+ if (kvm_msr_list->indices[i] == MSR_IA32_XSS) {
+ has_msr_xss = true;
+ continue;
+ }
}
}
if (has_msr_bndcfgs) {
kvm_msr_entry_set(&msrs[n++], MSR_IA32_BNDCFGS, env->msr_bndcfgs);
}
+ if (has_msr_xss) {
+ kvm_msr_entry_set(&msrs[n++], MSR_IA32_XSS, env->xss);
+ }
#ifdef TARGET_X86_64
if (lm_capable_kernel) {
kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
if (has_msr_bndcfgs) {
msrs[n++].index = MSR_IA32_BNDCFGS;
}
+ if (has_msr_xss) {
+ msrs[n++].index = MSR_IA32_XSS;
+ }
+
if (!env->tsc_valid) {
msrs[n++].index = MSR_IA32_TSC;
case MSR_IA32_BNDCFGS:
env->msr_bndcfgs = msrs[i].data;
break;
+ case MSR_IA32_XSS:
+ env->xss = msrs[i].data;
+ break;
default:
if (msrs[i].index >= MSR_MC0_CTL &&
msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
}
};
+static bool xss_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->xss != 0;
+}
+
+static const VMStateDescription vmstate_xss = {
+ .name = "cpu/xss",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.xss, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
VMStateDescription vmstate_x86_cpu = {
.name = "cpu",
.version_id = 12,
}, {
.vmsd = &vmstate_avx512,
.needed = avx512_needed,
+ }, {
+ .vmsd = &vmstate_xss,
+ .needed = xss_needed,
} , {
/* empty */
}