]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: x86/xen: register vcpu info
authorJoao Martins <joao.m.martins@oracle.com>
Fri, 29 Jun 2018 14:52:52 +0000 (10:52 -0400)
committerJoao Martins <joao.m.martins@oracle.com>
Wed, 20 Feb 2019 17:30:52 +0000 (12:30 -0500)
The vcpu info supersedes the per vcpu area of the shared info page and
the guest vcpus will use this instead.

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/x86.c
arch/x86/kvm/xen.c
arch/x86/kvm/xen.h
include/uapi/linux/kvm.h

index befc0e37f16279bbbd2c194646f46728842c9ae5..96f65ba4b3c0d8e6c8964f84aea30dd11e5c137c 100644 (file)
@@ -537,6 +537,8 @@ struct kvm_vcpu_hv {
 /* Xen per vcpu emulation context */
 struct kvm_vcpu_xen {
        struct kvm_xen_exit exit;
+       gpa_t vcpu_info_addr;
+       struct vcpu_info *vcpu_info;
 };
 
 struct kvm_vcpu_arch {
index 31a102b22042305f114140a2e7bd4062cf7220b4..3ce97860e6eeab8234b74fb1bbb5bcfe18740710 100644 (file)
@@ -9124,6 +9124,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
        int idx;
 
        kvm_hv_vcpu_uninit(vcpu);
+       kvm_xen_vcpu_uninit(vcpu);
        kvm_pmu_destroy(vcpu);
        kfree(vcpu->arch.mce_banks);
        kvm_free_lapic(vcpu);
index 879bcfdd7b1d280c4d73a838b9707e0c3d8117b9..36d6dd0ea4b825192f342b3201563a91b2eb8ee5 100644 (file)
 
 #include "trace.h"
 
+static void set_vcpu_attr(struct kvm_vcpu *v, u16 type, gpa_t gpa, void *addr)
+{
+       struct kvm_vcpu_xen *vcpu_xen = vcpu_to_xen_vcpu(v);
+
+       switch (type) {
+       case KVM_XEN_ATTR_TYPE_VCPU_INFO:
+               vcpu_xen->vcpu_info_addr = gpa;
+               vcpu_xen->vcpu_info = addr;
+               kvm_xen_setup_pvclock_page(v);
+               break;
+       default:
+               break;
+       }
+}
+
+static gpa_t get_vcpu_attr(struct kvm_vcpu *v, u16 type)
+{
+       struct kvm_vcpu_xen *vcpu_xen = vcpu_to_xen_vcpu(v);
+
+       switch (type) {
+       case KVM_XEN_ATTR_TYPE_VCPU_INFO:
+               return vcpu_xen->vcpu_info_addr;
+       default:
+               return 0;
+       }
+}
+
 static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
 {
        struct shared_info *shared_info;
@@ -37,26 +64,44 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
        return 0;
 }
 
+static void *xen_vcpu_info(struct kvm_vcpu *v)
+{
+       struct kvm_vcpu_xen *vcpu_xen = vcpu_to_xen_vcpu(v);
+       struct kvm_xen *kvm = &v->kvm->arch.xen;
+       unsigned int offset = 0;
+       void *hva = NULL;
+
+       if (vcpu_xen->vcpu_info_addr)
+               return vcpu_xen->vcpu_info;
+
+       if (kvm->shinfo_addr && v->vcpu_id < MAX_VIRT_CPUS) {
+               hva = kvm->shinfo;
+               offset += offsetof(struct shared_info, vcpu_info);
+               offset += v->vcpu_id * sizeof(struct vcpu_info);
+       }
+
+       return hva + offset;
+}
+
 void kvm_xen_setup_pvclock_page(struct kvm_vcpu *v)
 {
        struct kvm_vcpu_arch *vcpu = &v->arch;
        struct pvclock_vcpu_time_info *guest_hv_clock;
+       void *hva = xen_vcpu_info(v);
        unsigned int offset;
 
-       if (v->vcpu_id >= MAX_VIRT_CPUS)
+       if (!hva)
                return;
 
        offset = offsetof(struct vcpu_info, time);
-       offset += offsetof(struct shared_info, vcpu_info);
-       offset += v->vcpu_id * sizeof(struct vcpu_info);
 
        guest_hv_clock = (struct pvclock_vcpu_time_info *)
-               (((void *)v->kvm->arch.xen.shinfo) + offset);
+               (hva + offset);
 
        BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
 
        if (guest_hv_clock->version & 1)
-               ++guest_hv_clock->version;  /* first time write, random junk */
+               ++guest_hv_clock->version;
 
        vcpu->hv_clock.version = guest_hv_clock->version + 1;
        guest_hv_clock->version = vcpu->hv_clock.version;
@@ -93,6 +138,25 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
                r = kvm_xen_shared_info_init(kvm, gfn);
                break;
        }
+       case KVM_XEN_ATTR_TYPE_VCPU_INFO: {
+               gpa_t gpa = data->u.vcpu_attr.gpa;
+               struct kvm_vcpu *v;
+               struct page *page;
+               void *addr;
+
+               v = kvm_get_vcpu(kvm, data->u.vcpu_attr.vcpu);
+               if (!v)
+                       return -EINVAL;
+
+               page = gfn_to_page(v->kvm, gpa_to_gfn(gpa));
+               if (is_error_page(page))
+                       return -EFAULT;
+
+               addr = page_to_virt(page) + offset_in_page(gpa);
+               set_vcpu_attr(v, data->type, gpa, addr);
+               r = 0;
+               break;
+       }
        default:
                break;
        }
@@ -109,6 +173,17 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
                data->u.shared_info.gfn = kvm->arch.xen.shinfo_addr;
                break;
        }
+       case KVM_XEN_ATTR_TYPE_VCPU_INFO: {
+               struct kvm_vcpu *v;
+
+               v = kvm_get_vcpu(kvm, data->u.vcpu_attr.vcpu);
+               if (!v)
+                       return -EINVAL;
+
+               data->u.vcpu_attr.gpa = get_vcpu_attr(v, data->type);
+               r = 0;
+               break;
+       }
        default:
                break;
        }
@@ -180,6 +255,14 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+void kvm_xen_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+       struct kvm_vcpu_xen *vcpu_xen = vcpu_to_xen_vcpu(vcpu);
+
+       if (vcpu_xen->vcpu_info)
+               put_page(virt_to_page(vcpu_xen->vcpu_info));
+}
+
 void kvm_xen_destroy_vm(struct kvm *kvm)
 {
        struct kvm_xen *xen = &kvm->arch.xen;
index 827c9390da34afe8f3f01477de7448dd7f3ec108..10ebd0b7a25ed533757055633e1aff21405b73ca 100644 (file)
@@ -3,6 +3,19 @@
 #ifndef __ARCH_X86_KVM_XEN_H__
 #define __ARCH_X86_KVM_XEN_H__
 
+static inline struct kvm_vcpu_xen *vcpu_to_xen_vcpu(struct kvm_vcpu *vcpu)
+{
+       return &vcpu->arch.xen;
+}
+
+static inline struct kvm_vcpu *xen_vcpu_to_vcpu(struct kvm_vcpu_xen *xen_vcpu)
+{
+       struct kvm_vcpu_arch *arch;
+
+       arch = container_of(xen_vcpu, struct kvm_vcpu_arch, xen);
+       return container_of(arch, struct kvm_vcpu, arch);
+}
+
 void kvm_xen_setup_pvclock_page(struct kvm_vcpu *vcpu);
 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
@@ -11,5 +24,6 @@ bool kvm_xen_hypercall_set(struct kvm *kvm);
 int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
 
 void kvm_xen_destroy_vm(struct kvm *kvm);
+void kvm_xen_vcpu_uninit(struct kvm_vcpu *vcpu);
 
 #endif
index de2168d235af803a430a5e03784bd53c439dcfa9..782f497a0fdd67d30ae3a3b4219d80b78a8ed467 100644 (file)
@@ -1465,10 +1465,15 @@ struct kvm_xen_hvm_attr {
                struct {
                        __u64 gfn;
                } shared_info;
+               struct {
+                       __u32 vcpu;
+                       __u64 gpa;
+               } vcpu_attr;
        } u;
 };
 
 #define KVM_XEN_ATTR_TYPE_SHARED_INFO       0x0
+#define KVM_XEN_ATTR_TYPE_VCPU_INFO         0x1
 
 /* Secure Encrypted Virtualization command */
 enum sev_cmd_id {