has_msr_hv_hypercall = true;
}
+ env->xen_vcpu_info_gpa = UINT64_MAX;
+ env->xen_vcpu_info_default_gpa = UINT64_MAX;
+
if (cs->kvm_state->xen_version) {
#ifdef CONFIG_XEN_EMU
struct kvm_cpuid_entry2 *xen_max_leaf;
kvm_arch_set_tsc_khz(cpu);
}
+#ifdef CONFIG_XEN_EMU
+ if (level == KVM_PUT_FULL_STATE) {
+ uint64_t gpa = x86_cpu->env.xen_vcpu_info_gpa;
+ if (gpa == UINT64_MAX) {
+ gpa = x86_cpu->env.xen_vcpu_info_default_gpa;
+ }
+
+ if (gpa != UINT64_MAX) {
+ ret = kvm_xen_set_vcpu_attr(cpu, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, gpa);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ }
+#endif
+
ret = kvm_getput_regs(x86_cpu, 1);
if (ret < 0) {
return ret;
return true;
}
+int kvm_xen_set_vcpu_attr(CPUState *cs, uint16_t type, uint64_t gpa)
+{
+ struct kvm_xen_vcpu_attr xhsi;
+
+ xhsi.type = type;
+ xhsi.u.gpa = gpa;
+
+ trace_kvm_xen_set_vcpu_attr(cs->cpu_index, type, gpa);
+
+ return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &xhsi);
+}
+
+static void do_set_vcpu_info_default_gpa(CPUState *cs, run_on_cpu_data data)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ env->xen_vcpu_info_default_gpa = data.host_ulong;
+
+ /* Changing the default does nothing if a vcpu_info was explicitly set. */
+ if (env->xen_vcpu_info_gpa == UINT64_MAX) {
+ kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
+ env->xen_vcpu_info_default_gpa);
+ }
+}
+
+static void do_set_vcpu_info_gpa(CPUState *cs, run_on_cpu_data data)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ env->xen_vcpu_info_gpa = data.host_ulong;
+
+ kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
+ env->xen_vcpu_info_gpa);
+}
+
static int xen_set_shared_info(uint64_t gfn)
{
uint64_t gpa = gfn << TARGET_PAGE_BITS;
- int err;
+ int i, err;
/* The xen_overlay device tells KVM about it too, since it had to
* do that on migration load anyway (unless we're going to jump
trace_kvm_xen_set_shared_info(gfn);
+ for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) {
+ CPUState *cpu = qemu_get_cpu(i);
+ if (cpu) {
+ async_run_on_cpu(cpu, do_set_vcpu_info_default_gpa, RUN_ON_CPU_HOST_ULONG(gpa));
+ }
+ gpa += sizeof(vcpu_info_t);
+ }
+
return err;
}
}
}
+static int vcpuop_register_vcpu_info(CPUState *cs, CPUState *target,
+ uint64_t arg)
+{
+ struct vcpu_register_vcpu_info rvi;
+ uint64_t gpa;
+
+ /* No need for 32/64 compat handling */
+ qemu_build_assert(sizeof(rvi) == 16);
+ qemu_build_assert(sizeof(struct vcpu_info) == 64);
+
+ if (!target)
+ return -ENOENT;
+
+ if (kvm_copy_from_gva(cs, arg, &rvi, sizeof(rvi))) {
+ return -EFAULT;
+ }
+
+ if (rvi.offset > TARGET_PAGE_SIZE - sizeof(struct vcpu_info)) {
+ return -EINVAL;
+ }
+
+ gpa = ((rvi.mfn << TARGET_PAGE_BITS) + rvi.offset);
+ async_run_on_cpu(target, do_set_vcpu_info_gpa, RUN_ON_CPU_HOST_ULONG(gpa));
+ return 0;
+}
+
static bool kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu,
int cmd, int vcpu_id, uint64_t arg)
{
+ CPUState *dest = qemu_get_cpu(vcpu_id);
+ CPUState *cs = CPU(cpu);
int err;
switch (cmd) {
case VCPUOP_register_vcpu_info:
- /* no vcpu info placement for now */
- err = -ENOSYS;
+ err = vcpuop_register_vcpu_info(cs, dest, arg);
break;
default:
}
};
+static bool xen_vcpu_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return (env->xen_vcpu_info_gpa != UINT64_MAX ||
+ env->xen_vcpu_info_default_gpa != UINT64_MAX);
+}
+
+static const VMStateDescription vmstate_xen_vcpu = {
+ .name = "cpu/xen_vcpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = xen_vcpu_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.xen_vcpu_info_gpa, X86CPU),
+ VMSTATE_UINT64(env.xen_vcpu_info_default_gpa, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
#endif
static bool mcg_ext_ctl_needed(void *opaque)
#endif
#ifdef CONFIG_KVM
&vmstate_nested_state,
+ &vmstate_xen_vcpu,
#endif
&vmstate_msr_tsx_ctrl,
&vmstate_msr_intel_sgx,