]> www.infradead.org Git - users/dwmw2/qemu.git/commitdiff
i386/xen: handle register_vcpu_info
authorJoao Martins <joao.m.martins@oracle.com>
Fri, 29 Jun 2018 14:54:50 +0000 (10:54 -0400)
committerJoao Martins <joao.m.martins@oracle.com>
Tue, 19 Feb 2019 14:00:57 +0000 (09:00 -0500)
Handle the hypercall to set a per vcpu info, as opposed to using
shared_info equivalent. Also, Guests may not call
VCPUOP_register_vcpu_info and will fail in event channel operations if a
proper one isn't set in Qemu.

So derive the hva from shared_info which is where these are located when
guest doesn't seed an additional pointer.

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
linux-headers/linux/kvm.h
target/i386/cpu.h
target/i386/trace-events
target/i386/xen-proto.h
target/i386/xen.c

index 996689087f75157824361a3e243b93b83a6d919d..c6e9d97be8379618a19eabbc8f014217b0972df6 100644 (file)
@@ -1464,10 +1464,15 @@ struct kvm_xen_hvm_attr {
                struct {
                        __u64 gfn;
                } shared_info;
+               struct {
+                       __u32 vcpu;
+                       __u64 gpa;
+               } vcpu_attr;
        } u;
 };
 
 #define KVM_XEN_ATTR_TYPE_SHARED_INFO       0x0
+#define KVM_XEN_ATTR_TYPE_VCPU_INFO         0x1
 
 /* Secure Encrypted Virtualization command */
 enum sev_cmd_id {
index e3fc8ce131e21e4a25167fbc5dfe688decf4b11b..687b2b887962cdb7fcf587f43810ce2e6aaa361d 100644 (file)
@@ -1345,6 +1345,7 @@ typedef struct CPUX86State {
 #if defined(CONFIG_HVF)
     HVFX86EmulatorState *hvf_emul;
 #endif
+    struct XenCPUState xen_vcpu;
 
     uint64_t mcg_cap;
     uint64_t mcg_ctl;
index 444e33d53b592d3dcfe85f8c7232faf952cf1a6d..3599fd1151d187a13426ba774adf23c48bc3c8e8 100644 (file)
@@ -19,3 +19,4 @@ kvm_sev_launch_finish(void) ""
 # target/i386/xen.c
 kvm_xen_hypercall(int cpu, uint64_t input, uint64_t a0, uint64_t a1, uint64_t a2, uint64_t ret) "xen_hypercall: cpu %d input %" PRIu64 " a0 0x%" PRIx64 " a1 0x%" PRIx64 " a2 0x%" PRIx64" ret 0x%" PRIu64
 kvm_xen_set_shared_info(uint64_t gfn) "shared info at gfn 0x%" PRIx64
+kvm_xen_set_vcpu_attr(int cpu, int type, uint64_t gpa) "vcpu attr cpu %d type %d gpa 0x%" PRIu64
index c394909f54bd0892e59866893db34ee92c6a90a1..8f6ee4c17bef282b12acc6971c021cfc5f12c012 100644 (file)
@@ -15,5 +15,9 @@ typedef struct XenState {
     struct shared_info *shared_info;
 } XenState;
 
+typedef struct XenCPUState {
+   struct vcpu_info *info;
+} XenCPUState;
+
 #endif
 
index 5b02b5f6662af6cdc67ba13bb4b031d45e823e31..eeafe6c9b3e69546764db54fcf2fbdf14a491584 100644 (file)
@@ -16,6 +16,7 @@
 #include "cpu.h"
 #include "xen.h"
 #include "trace.h"
+#include "sysemu/sysemu.h"
 
 #include "standard-headers/xen/version.h"
 #include "standard-headers/xen/memory.h"
@@ -148,13 +149,23 @@ static int xen_set_shared_info(CPUState *cs, struct shared_info *shi,
     struct kvm_xen_hvm_attr xhsi;
     XenState *xen = cs->xen_state;
     KVMState *s = cs->kvm_state;
-    int err;
+    XenCPUState *xcpu;
+    CPUState *cpu;
+    int i, err;
 
     xhsi.type = KVM_XEN_ATTR_TYPE_SHARED_INFO;
     xhsi.u.shared_info.gfn = gfn;
     err = kvm_vm_ioctl(s, KVM_XEN_HVM_SET_ATTR, &xhsi);
     trace_kvm_xen_set_shared_info(gfn);
     xen->shared_info = shi;
+
+    for (i = 0; i < smp_cpus; i++) {
+        cpu = qemu_get_cpu(i);
+
+        xcpu = &X86_CPU(cpu)->env.xen_vcpu;
+        xcpu->info = &shi->vcpu_info[cpu->cpu_index];
+    }
+
     return err;
 }
 
@@ -212,19 +223,59 @@ static int kvm_xen_hcall_hvm_op(struct kvm_xen_exit *exit,
     return HCALL_ERR;
 }
 
-static int kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit,
-                                 int cmd, uint64_t arg)
+static int xen_set_vcpu_attr(CPUState *cs, uint16_t type, uint64_t gpa)
 {
+    struct kvm_xen_hvm_attr xhsi;
+    KVMState *s = cs->kvm_state;
+
+    xhsi.type = type;
+    xhsi.u.vcpu_attr.vcpu = cs->cpu_index;
+    xhsi.u.vcpu_attr.gpa = gpa;
+
+    trace_kvm_xen_set_vcpu_attr(cs->cpu_index, type, gpa);
+
+    return kvm_vm_ioctl(s, KVM_XEN_HVM_SET_ATTR, &xhsi);
+}
+
+static int vcpuop_register_vcpu_info(CPUState *cs, CPUState *target,
+                                     uint64_t arg)
+{
+    XenCPUState *xt = &X86_CPU(target)->env.xen_vcpu;
+    struct vcpu_register_vcpu_info *rvi;
+    uint64_t gpa;
+    void *hva;
+
+    rvi = gva_to_hva(cs, arg);
+    if (!rvi) {
+        return -EFAULT;
+    }
+
+    gpa = ((rvi->mfn << PAGE_SHIFT) + rvi->offset);
+    hva = gpa_to_hva(gpa);
+    if (!hva) {
+        return -EFAULT;
+    }
+
+    xt->info = hva;
+    return xen_set_vcpu_attr(target, KVM_XEN_ATTR_TYPE_VCPU_INFO, gpa);
+}
+
+static int kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu,
+                                 int cmd, int vcpu_id, uint64_t arg)
+{
+    CPUState *dest = qemu_get_cpu(vcpu_id);
+    CPUState *cs = CPU(cpu);
+    int err = -ENOSYS;
+
     switch (cmd) {
     case VCPUOP_register_vcpu_info: {
-            /* no vcpu info placement for now */
-            exit->u.hcall.result = -ENOSYS;
-            return 0;
+            err = vcpuop_register_vcpu_info(cs, dest, arg);
+            break;
         }
     }
 
-    exit->u.hcall.result = -ENOSYS;
-    return HCALL_ERR;
+    exit->u.hcall.result = err;
+    return err ? HCALL_ERR : 0;
 }
 
 static int __kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit)
@@ -233,8 +284,10 @@ static int __kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit)
 
     switch (code) {
     case __HYPERVISOR_vcpu_op:
-        return kvm_xen_hcall_vcpu_op(exit, exit->u.hcall.params[0],
-                                     exit->u.hcall.params[1]);
+        return kvm_xen_hcall_vcpu_op(exit, cpu,
+                                     exit->u.hcall.params[0],
+                                     exit->u.hcall.params[1],
+                                     exit->u.hcall.params[2]);
     case __HYPERVISOR_hvm_op:
         return kvm_xen_hcall_hvm_op(exit, exit->u.hcall.params[0],
                                     exit->u.hcall.params[1]);