int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
{
+ struct kvm_vcpu *v;
int r = -ENOENT;
switch (data->type) {
r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
break;
+ case KVM_XEN_ATTR_TYPE_VCPU_INFO:
+ v = kvm_get_vcpu_by_id(kvm, data->u.vcpu_attr.vcpu_id);
+ if (!v)
+ return -EINVAL;
+
+ /* No compat necessary here. */
+ BUILD_BUG_ON(sizeof(struct vcpu_info) !=
+ sizeof(struct compat_vcpu_info));
+ r = kvm_gfn_to_hva_cache_init(kvm, &v->arch.xen.vcpu_info_cache,
+ data->u.vcpu_attr.gpa,
+ sizeof(struct vcpu_info));
+ if (r)
+ return r;
+
+ v->arch.xen.vcpu_info_set = true;
+ break;
+
default:
break;
}
int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
{
int r = -ENOENT;
+ struct kvm_vcpu *v;
switch (data->type) {
case KVM_XEN_ATTR_TYPE_LONG_MODE:
}
break;
+ case KVM_XEN_ATTR_TYPE_VCPU_INFO:
+ v = kvm_get_vcpu_by_id(kvm, data->u.vcpu_attr.vcpu_id);
+ if (!v)
+ return -EINVAL;
+
+ if (v->arch.xen.vcpu_info_set) {
+ data->u.vcpu_attr.gpa = v->arch.xen.vcpu_info_cache.gpa;
+ r = 0;
+ }
+ break;
+
default:
break;
}
struct {
__u64 gfn;
} shared_info;
+ struct {
+ __u32 vcpu_id;
+ __u64 gpa;
+ } vcpu_attr;
__u64 pad[4];
} u;
};
#define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0
#define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1
+#define KVM_XEN_ATTR_TYPE_VCPU_INFO 0x2
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {