#define KVM_REQ_HV_STIMER KVM_ARCH_REQ(22)
#define KVM_REQ_LOAD_EOI_EXITMAP KVM_ARCH_REQ(23)
#define KVM_REQ_GET_VMCS12_PAGES KVM_ARCH_REQ(24)
+#define KVM_REQ_XEN_EXIT KVM_ARCH_REQ(25)
#define CR0_RESERVED_BITS \
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
cpumask_t tlb_flush;
};
+/* Xen per vcpu emulation context */
+struct kvm_vcpu_xen {
+ struct kvm_xen_exit exit;
+};
+
struct kvm_vcpu_arch {
/*
* rip and regs accesses must go through
unsigned long singlestep_rip;
struct kvm_vcpu_hv hyperv;
+ struct kvm_vcpu_xen xen;
cpumask_var_t wbinvd_dirty_mask;
atomic_t num_mismatched_vp_indexes;
};
+/* Xen emulation context */
+struct kvm_xen {
+ u64 xen_hypercall;
+};
+
enum kvm_irqchip_mode {
KVM_IRQCHIP_NONE,
KVM_IRQCHIP_KERNEL, /* created with KVM_CREATE_IRQCHIP */
struct hlist_head mask_notifier_list;
struct kvm_hv hyperv;
+ struct kvm_xen xen;
#ifdef CONFIG_KVM_MMU_AUDIT
int audit_point;
kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
- hyperv.o page_track.o debugfs.o
+ hyperv.o xen.o page_track.o debugfs.o
kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o vmx/evmcs.o vmx/nested.o
kvm-amd-y += svm.o pmu_amd.o
__entry->outgpa)
);
+/*
+ * Tracepoint for Xen hypercall.
+ */
+TRACE_EVENT(kvm_xen_hypercall,
+ TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3, unsigned long a4),
+ TP_ARGS(nr, a0, a1, a2, a3, a4),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, nr)
+ __field(unsigned long, a0)
+ __field(unsigned long, a1)
+ __field(unsigned long, a2)
+ __field(unsigned long, a3)
+ __field(unsigned long, a4)
+ ),
+
+ TP_fast_assign(
+ __entry->nr = nr;
+ __entry->a0 = a0;
+ __entry->a1 = a1;
+ __entry->a2 = a2;
+ __entry->a3 = a3;
+ __entry->a4 = a4;
+ ),
+
+ TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx a4 0x%lx",
+ __entry->nr, __entry->a0, __entry->a1, __entry->a2,
+ __entry->a3, __entry->a4)
+);
+
+
+
/*
* Tracepoint for PIO.
*/
#include "cpuid.h"
#include "pmu.h"
#include "hyperv.h"
+#include "xen.h"
#include <linux/clocksource.h>
#include <linux/interrupt.h>
}
if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE))
goto out_free;
+
+ kvm_xen_hypercall_set(kvm);
r = 0;
out_free:
kfree(page);
if (kvm_hv_hypercall_enabled(vcpu->kvm))
return kvm_hv_hypercall(vcpu);
+ if (kvm_xen_hypercall_enabled(vcpu->kvm))
+ return kvm_xen_hypercall(vcpu);
+
nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
r = 0;
goto out;
}
+ if (kvm_check_request(KVM_REQ_XEN_EXIT, vcpu)) {
+ vcpu->run->exit_reason = KVM_EXIT_XEN;
+ vcpu->run->xen = vcpu->arch.xen.exit;
+ r = 0;
+ goto out;
+ }
/*
* KVM_REQ_HV_STIMER has to be processed after
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Oracle and/or its affiliates. All rights reserved.
+ *
+ * KVM Xen emulation
+ */
+
+#include "x86.h"
+#include "xen.h"
+
+#include <linux/kvm_host.h>
+
+#include <trace/events/kvm.h>
+
+#include "trace.h"
+
+bool kvm_xen_hypercall_enabled(struct kvm *kvm)
+{
+ return READ_ONCE(kvm->arch.xen.xen_hypercall);
+}
+
+bool kvm_xen_hypercall_set(struct kvm *kvm)
+{
+ return WRITE_ONCE(kvm->arch.xen.xen_hypercall, 1);
+}
+
+static void kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
+{
+ kvm_register_write(vcpu, VCPU_REGS_RAX, result);
+}
+
+static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+
+ kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result);
+ return kvm_skip_emulated_instruction(vcpu);
+}
+
+int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
+{
+ bool longmode;
+ u64 input, params[5];
+
+ input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
+
+ longmode = is_64_bit_mode(vcpu);
+ if (!longmode) {
+ params[0] = (u64)kvm_register_read(vcpu, VCPU_REGS_RBX);
+ params[1] = (u64)kvm_register_read(vcpu, VCPU_REGS_RCX);
+ params[2] = (u64)kvm_register_read(vcpu, VCPU_REGS_RDX);
+ params[3] = (u64)kvm_register_read(vcpu, VCPU_REGS_RSI);
+ params[4] = (u64)kvm_register_read(vcpu, VCPU_REGS_RDI);
+ }
+#ifdef CONFIG_X86_64
+ else {
+ params[0] = (u64)kvm_register_read(vcpu, VCPU_REGS_RDI);
+ params[1] = (u64)kvm_register_read(vcpu, VCPU_REGS_RSI);
+ params[2] = (u64)kvm_register_read(vcpu, VCPU_REGS_RDX);
+ params[3] = (u64)kvm_register_read(vcpu, VCPU_REGS_R10);
+ params[4] = (u64)kvm_register_read(vcpu, VCPU_REGS_R8);
+ }
+#endif
+ trace_kvm_xen_hypercall(input, params[0], params[1], params[2],
+ params[3], params[4]);
+
+ vcpu->run->exit_reason = KVM_EXIT_XEN;
+ vcpu->run->xen.type = KVM_EXIT_XEN_HCALL;
+ vcpu->run->xen.u.hcall.input = input;
+ vcpu->run->xen.u.hcall.params[0] = params[0];
+ vcpu->run->xen.u.hcall.params[1] = params[1];
+ vcpu->run->xen.u.hcall.params[2] = params[2];
+ vcpu->run->xen.u.hcall.params[3] = params[3];
+ vcpu->run->xen.u.hcall.params[4] = params[4];
+ vcpu->arch.complete_userspace_io =
+ kvm_xen_hypercall_complete_userspace;
+
+ return 0;
+}
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 Oracle and/or its affiliates. All rights reserved. */
+#ifndef __ARCH_X86_KVM_XEN_H__
+#define __ARCH_X86_KVM_XEN_H__
+
+bool kvm_xen_hypercall_enabled(struct kvm *kvm);
+bool kvm_xen_hypercall_set(struct kvm *kvm);
+int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
+
+#endif
} u;
};
+struct kvm_xen_exit {
+#define KVM_EXIT_XEN_HCALL 1
+ __u32 type;
+ union {
+ struct {
+ __u64 input;
+ __u64 result;
+ __u64 params[5];
+ } hcall;
+ } u;
+};
+
#define KVM_S390_GET_SKEYS_NONE 1
#define KVM_S390_SKEYS_MAX 1048576
#define KVM_EXIT_S390_STSI 25
#define KVM_EXIT_IOAPIC_EOI 26
#define KVM_EXIT_HYPERV 27
+#define KVM_EXIT_XEN 28
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
} eoi;
/* KVM_EXIT_HYPERV */
struct kvm_hyperv_exit hyperv;
+ /* KVM_EXIT_XEN */
+ struct kvm_xen_exit xen;
/* Fix the size of the union. */
- char padding[256];
+ char padding[196];
};
/* 2048 is the size of the char array used to bound/pad the size