/* memory encryption */
void *memcrypt_handle;
int (*memcrypt_encrypt_data)(void *handle, uint8_t *ptr, uint64_t len);
+
+ /* xen guest state */
+ struct XenState xen;
};
KVMState *kvm_state;
KVM_CAP_LAST_INFO
};
+struct XenState *kvm_get_xen_state(KVMState *s)
+{
+ return &s->xen;
+}
+
int kvm_get_max_memslots(void)
{
KVMState *s = KVM_STATE(current_machine->accelerator);
cpu->kvm_fd = ret;
cpu->kvm_state = s;
cpu->vcpu_dirty = true;
+ cpu->xen_state = &s->xen;
mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
if (mmap_size < 0) {
/* Available with KVM_CAP_HYPERV_CPUID */
#define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2)
+#define KVM_XEN_HVM_GET_ATTR _IOWR(KVMIO, 0xc2, struct kvm_xen_hvm_attr)
+#define KVM_XEN_HVM_SET_ATTR _IOW(KVMIO, 0xc3, struct kvm_xen_hvm_attr)
+
+struct kvm_xen_hvm_attr {
+ __u16 type;
+
+ union {
+ struct {
+ __u64 gfn;
+ } shared_info;
+ } u;
+};
+
+#define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x0
+
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
/* Guest initialization commands */
--- /dev/null
+/*
+ * Definitions for Xen guest/hypervisor interaction - x86-specific part
+ *
+ * Copyright (c) 2019 Oracle and/or its affiliates. All rights reserved.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef TARGET_I386_XEN_PROTO_H
+#define TARGET_I386_XEN_PROTO_H
+
+typedef struct XenState {
+ struct shared_info *shared_info;
+} XenState;
+
+#endif
+
#include "qemu/log.h"
#include "linux/kvm.h"
#include "exec/address-spaces.h"
-#include "standard-headers/xen/version.h"
#include "cpu.h"
#include "xen.h"
-
#include "trace.h"
+#include "standard-headers/xen/version.h"
+#include "standard-headers/xen/memory.h"
+
#define PAGE_OFFSET 0xffffffff80000000UL
+#define PAGE_SHIFT 12
/*
* Unhandled hypercalls error:
return err ? HCALL_ERR : 0;
}
+static int xen_set_shared_info(CPUState *cs, struct shared_info *shi,
+ uint64_t gfn)
+{
+ struct kvm_xen_hvm_attr xhsi;
+ XenState *xen = cs->xen_state;
+ KVMState *s = cs->kvm_state;
+ int err;
+
+ xhsi.type = KVM_XEN_ATTR_TYPE_SHARED_INFO;
+ xhsi.u.shared_info.gfn = gfn;
+ err = kvm_vm_ioctl(s, KVM_XEN_HVM_SET_ATTR, &xhsi);
+ trace_kvm_xen_set_shared_info(gfn);
+ xen->shared_info = shi;
+ return err;
+}
+
+static int kvm_xen_hcall_memory_op(struct kvm_xen_exit *exit,
+ int cmd, uint64_t arg, X86CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ int err = 0;
+
+ switch (cmd) {
+ case XENMEM_add_to_physmap: {
+ struct xen_add_to_physmap *xatp;
+ struct shared_info *shi;
+
+ xatp = gva_to_hva(cs, arg);
+ if (!xatp) {
+ err = -EFAULT;
+ break;
+ }
+
+ switch (xatp->space) {
+ case XENMAPSPACE_shared_info:
+ break;
+ default:
+ err = -ENOSYS;
+ break;
+ }
+
+ shi = gpa_to_hva(xatp->gpfn << PAGE_SHIFT);
+ if (!shi) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = xen_set_shared_info(cs, shi, xatp->gpfn);
+ break;
+ }
+ }
+
+ exit->u.hcall.result = err;
+ return err ? HCALL_ERR : 0;
+}
+
static int __kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit)
{
uint16_t code = exit->u.hcall.input;
switch (code) {
+ case __HYPERVISOR_memory_op:
+ return kvm_xen_hcall_memory_op(exit, exit->u.hcall.params[0],
+ exit->u.hcall.params[1], cpu);
case __HYPERVISOR_xen_version:
return kvm_xen_hcall_xen_version(exit, cpu, exit->u.hcall.params[0],
exit->u.hcall.params[1]);