int kvm_xen_init_vcpu(CPUState *cs)
{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
int err;
/*
}
}
+ env->xen_vcpu_info_gpa = INVALID_GPA;
+ env->xen_vcpu_info_default_gpa = INVALID_GPA;
+
return 0;
}
return true;
}
+static int kvm_xen_set_vcpu_attr(CPUState *cs, uint16_t type, uint64_t gpa)
+{
+ struct kvm_xen_vcpu_attr xhsi;
+
+ xhsi.type = type;
+ xhsi.u.gpa = gpa;
+
+ trace_kvm_xen_set_vcpu_attr(cs->cpu_index, type, gpa);
+
+ return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &xhsi);
+}
+
+static void do_set_vcpu_info_default_gpa(CPUState *cs, run_on_cpu_data data)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ env->xen_vcpu_info_default_gpa = data.host_ulong;
+
+ /* Changing the default does nothing if a vcpu_info was explicitly set. */
+ if (env->xen_vcpu_info_gpa == INVALID_GPA) {
+ kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
+ env->xen_vcpu_info_default_gpa);
+ }
+}
+
+static void do_set_vcpu_info_gpa(CPUState *cs, run_on_cpu_data data)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ env->xen_vcpu_info_gpa = data.host_ulong;
+
+ kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
+ env->xen_vcpu_info_gpa);
+}
+
+static void do_vcpu_soft_reset(CPUState *cs, run_on_cpu_data data)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ env->xen_vcpu_info_gpa = INVALID_GPA;
+ env->xen_vcpu_info_default_gpa = INVALID_GPA;
+
+ kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, INVALID_GPA);
+}
+
static int xen_set_shared_info(uint64_t gfn)
{
uint64_t gpa = gfn << TARGET_PAGE_BITS;
- int err;
+ int i, err;
/*
* The xen_overlay device tells KVM about it too, since it had to
trace_kvm_xen_set_shared_info(gfn);
+ for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) {
+ CPUState *cpu = qemu_get_cpu(i);
+ if (cpu) {
+ async_run_on_cpu(cpu, do_set_vcpu_info_default_gpa,
+ RUN_ON_CPU_HOST_ULONG(gpa));
+ }
+ gpa += sizeof(vcpu_info_t);
+ }
+
return err;
}
}
}
+static int vcpuop_register_vcpu_info(CPUState *cs, CPUState *target,
+ uint64_t arg)
+{
+ struct vcpu_register_vcpu_info rvi;
+ uint64_t gpa;
+
+ /* No need for 32/64 compat handling */
+ qemu_build_assert(sizeof(rvi) == 16);
+ qemu_build_assert(sizeof(struct vcpu_info) == 64);
+
+ if (!target) {
+ return -ENOENT;
+ }
+
+ if (kvm_copy_from_gva(cs, arg, &rvi, sizeof(rvi))) {
+ return -EFAULT;
+ }
+
+ if (rvi.offset > TARGET_PAGE_SIZE - sizeof(struct vcpu_info)) {
+ return -EINVAL;
+ }
+
+ gpa = ((rvi.mfn << TARGET_PAGE_BITS) + rvi.offset);
+ async_run_on_cpu(target, do_set_vcpu_info_gpa, RUN_ON_CPU_HOST_ULONG(gpa));
+ return 0;
+}
+
static bool kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu,
int cmd, int vcpu_id, uint64_t arg)
{
+ CPUState *dest = qemu_get_cpu(vcpu_id);
+ CPUState *cs = CPU(cpu);
int err;
switch (cmd) {
case VCPUOP_register_vcpu_info:
- /* no vcpu info placement for now */
- err = -ENOSYS;
+ err = vcpuop_register_vcpu_info(cs, dest, arg);
break;
default:
static int kvm_xen_soft_reset(void)
{
+ CPUState *cpu;
int err;
+ CPU_FOREACH(cpu) {
+ async_run_on_cpu(cpu, do_vcpu_soft_reset, RUN_ON_CPU_NULL);
+ }
+
err = xen_overlay_map_shinfo_page(INVALID_GFN);
if (err) {
return err;
exit->u.hcall.result);
return 0;
}
+
+int kvm_put_xen_state(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ uint64_t gpa;
+ int ret;
+
+ gpa = env->xen_vcpu_info_gpa;
+ if (gpa == INVALID_GPA) {
+ gpa = env->xen_vcpu_info_default_gpa;
+ }
+
+ if (gpa != INVALID_GPA) {
+ ret = kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, gpa);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int kvm_get_xen_state(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ uint64_t gpa;
+
+ /*
+ * The kernel does not mark vcpu_info as dirty when it delivers interrupts
+ * to it. It's up to userspace to *assume* that any page shared thus is
+ * always considered dirty. The shared_info page is different since it's
+ * an overlay and migrated separately anyway.
+ */
+ gpa = env->xen_vcpu_info_gpa;
+ if (gpa == INVALID_GPA) {
+ gpa = env->xen_vcpu_info_default_gpa;
+ }
+ if (gpa != INVALID_GPA) {
+ MemoryRegionSection mrs = memory_region_find(get_system_memory(),
+ gpa,
+ sizeof(struct vcpu_info));
+ if (mrs.mr && mrs.size >= sizeof(struct vcpu_info)) {
+ memory_region_set_dirty(mrs.mr, mrs.offset_within_region,
+ sizeof(struct vcpu_info));
+ }
+ }
+
+ return 0;
+}