]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: x86/xen: register vcpu info
authorJoao Martins <joao.m.martins@oracle.com>
Fri, 29 Jun 2018 14:52:52 +0000 (10:52 -0400)
committerDavid Woodhouse <dwmw@amazon.co.uk>
Fri, 4 Dec 2020 09:06:56 +0000 (09:06 +0000)
The vcpu info supersedes the per vcpu area of the shared info page and
the guest vcpus will use this instead.

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/x86.c
arch/x86/kvm/xen.c
arch/x86/kvm/xen.h
include/uapi/linux/kvm.h

index b6eff9814c6a4455c7158213151adea7d41c1d2d..617c120d03a31b92c4ecb5807c413f059b3ea237 100644 (file)
@@ -523,6 +523,8 @@ struct kvm_vcpu_hv {
 /* Xen HVM per vcpu emulation context */
 struct kvm_vcpu_xen {
        u64 hypercall_rip;
+       struct kvm_host_map vcpu_info_map;
+       struct vcpu_info *vcpu_info;
 };
 
 struct kvm_vcpu_arch {
index abc4c78a7aa47c3d037d4e2f7d254a1e3a78375c..27f3a59f45b1a23d4e53ed2b5d3eace291df3565 100644 (file)
@@ -10004,6 +10004,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
        kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
 
        kvm_hv_vcpu_uninit(vcpu);
+       kvm_xen_vcpu_uninit(vcpu);
        kvm_pmu_destroy(vcpu);
        kfree(vcpu->arch.mce_banks);
        kvm_free_lapic(vcpu);
index 2e4e98297364c04a12df39b9f415cc2bdeec1b12..f9ae2cfae0d2e78f75f66110d0f5d3fa7dd5c621 100644 (file)
@@ -80,13 +80,33 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
        return 0;
 }
 
+static void *xen_vcpu_info(struct kvm_vcpu *v)
+{
+       struct kvm_vcpu_xen *vcpu_xen = vcpu_to_xen_vcpu(v);
+       struct kvm_xen *kvm = &v->kvm->arch.xen;
+       unsigned int offset = 0;
+       void *hva = NULL;
+
+       if (vcpu_xen->vcpu_info)
+               return vcpu_xen->vcpu_info;
+
+       if (kvm->shinfo && v->vcpu_id < MAX_VIRT_CPUS) {
+               hva = kvm->shinfo;
+               offset += offsetof(struct shared_info, vcpu_info);
+               offset += v->vcpu_id * sizeof(struct vcpu_info);
+       }
+
+       return hva + offset;
+}
+
 void kvm_xen_setup_pvclock_page(struct kvm_vcpu *v)
 {
        struct kvm_vcpu_arch *vcpu = &v->arch;
        struct pvclock_vcpu_time_info *guest_hv_clock;
+       void *hva = xen_vcpu_info(v);
        unsigned int offset;
 
-       if (v->vcpu_id >= MAX_VIRT_CPUS)
+       if (!hva)
                return;
 
        BUILD_BUG_ON(offsetof(struct shared_info, vcpu_info) != 0);
@@ -98,10 +118,10 @@ void kvm_xen_setup_pvclock_page(struct kvm_vcpu *v)
        offset = v->vcpu_id * sizeof(struct vcpu_info);
        offset += offsetof(struct vcpu_info, time);
 
-       guest_hv_clock = v->kvm->arch.xen.shinfo + offset;
+       guest_hv_clock = hva + offset;
 
        if (guest_hv_clock->version & 1)
-               ++guest_hv_clock->version;  /* first time write, random junk */
+               ++guest_hv_clock->version;
 
        vcpu->hv_clock.version = guest_hv_clock->version + 1;
        guest_hv_clock->version = vcpu->hv_clock.version;
@@ -127,6 +147,20 @@ void kvm_xen_setup_pvclock_page(struct kvm_vcpu *v)
        guest_hv_clock->version = vcpu->hv_clock.version;
 }
 
+static int vcpu_attr_loc(struct kvm_vcpu *vcpu, u16 type,
+                        struct kvm_host_map **map, void ***hva, size_t *sz)
+{
+       switch(type) {
+       case KVM_XEN_ATTR_TYPE_VCPU_INFO:
+               *map = &vcpu->arch.xen.vcpu_info_map;
+               *hva = (void **)&vcpu->arch.xen.vcpu_info;
+               if (sz)
+                       *sz = sizeof(struct vcpu_info);
+               return 0;
+       }
+       return -EINVAL;
+}
+
 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
 {
        int r = -ENOENT;
@@ -147,6 +181,28 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
                break;
        }
 
+       case KVM_XEN_ATTR_TYPE_VCPU_INFO: {
+               gpa_t gpa = data->u.vcpu_attr.gpa;
+               struct kvm_host_map *map;
+               struct kvm_vcpu *v;
+               size_t sz;
+               void **hva;
+
+               v = kvm_get_vcpu(kvm, data->u.vcpu_attr.vcpu);
+               if (!v)
+                       return -EINVAL;
+
+               r = vcpu_attr_loc(v, data->type, &map, &hva, &sz);
+               if (r)
+                       return r;
+
+               r = kvm_xen_map_guest_page(kvm, map, hva, gpa, sz);
+               if (!r)
+                       kvm_xen_setup_pvclock_page(v);
+
+               break;
+       }
+
        default:
                break;
        }
@@ -172,6 +228,27 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
                break;
        }
 
+       case KVM_XEN_ATTR_TYPE_VCPU_INFO: {
+               struct kvm_host_map *map;
+               struct kvm_vcpu *v;
+               void **hva;
+
+               v = kvm_get_vcpu(kvm, data->u.vcpu_attr.vcpu);
+               if (!v)
+                       return -EINVAL;
+
+               r = vcpu_attr_loc(v, data->type, &map, &hva, NULL);
+               if (r)
+                       return r;
+
+               if (*hva) {
+                       data->u.vcpu_attr.gpa = gfn_to_gpa(map->gfn) +
+                               offset_in_page(*hva);
+                       r = 0;
+               }
+               break;
+       }
+
        default:
                break;
        }
@@ -311,6 +388,17 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+void kvm_xen_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+       struct kvm_vcpu_xen *vcpu_xen = vcpu_to_xen_vcpu(vcpu);
+
+       if (vcpu_xen->vcpu_info) {
+               kvm_unmap_gfn(vcpu->kvm, &vcpu_xen->vcpu_info_map,
+                             NULL, true, false);
+               vcpu_xen->vcpu_info = NULL;
+       }
+}
+
 void kvm_xen_destroy_vm(struct kvm *kvm)
 {
        struct kvm_xen *xen = &kvm->arch.xen;
index 4f73866b3d33e460a301947107fdad7435da1917..6d09b46d3c2e43faa592dee48286614300cc529f 100644 (file)
@@ -9,12 +9,26 @@
 #ifndef __ARCH_X86_KVM_XEN_H__
 #define __ARCH_X86_KVM_XEN_H__
 
+static inline struct kvm_vcpu_xen *vcpu_to_xen_vcpu(struct kvm_vcpu *vcpu)
+{
+       return &vcpu->arch.xen;
+}
+
+static inline struct kvm_vcpu *xen_vcpu_to_vcpu(struct kvm_vcpu_xen *xen_vcpu)
+{
+       struct kvm_vcpu_arch *arch;
+
+       arch = container_of(xen_vcpu, struct kvm_vcpu_arch, xen);
+       return container_of(arch, struct kvm_vcpu, arch);
+}
+
 void kvm_xen_setup_pvclock_page(struct kvm_vcpu *vcpu);
 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
 int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
 int kvm_xen_hvm_config(struct kvm_vcpu *vcpu, u64 data);
 void kvm_xen_destroy_vm(struct kvm *kvm);
+void kvm_xen_vcpu_uninit(struct kvm_vcpu *vcpu);
 
 static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
 {
index caa9faf3c5adbca136b8fb103a6a871571010ce4..8dda080175b62425e44afe93e254d3898b9dd344 100644 (file)
@@ -1588,12 +1588,17 @@ struct kvm_xen_hvm_attr {
                struct {
                        __u64 gfn;
                } shared_info;
+               struct {
+                       __u32 vcpu;
+                       __u64 gpa;
+               } vcpu_attr;
                __u64 pad[4];
        } u;
 };
 
 #define KVM_XEN_ATTR_TYPE_LONG_MODE            0x0
 #define KVM_XEN_ATTR_TYPE_SHARED_INFO          0x1
+#define KVM_XEN_ATTR_TYPE_VCPU_INFO            0x2
 
 /* Secure Encrypted Virtualization command */
 enum sev_cmd_id {