cpumask_t tlb_flush;
};
+struct kvm_xen_callback {
+ u32 via;
+ u32 vector;
+ atomic_t queued;
+};
+
/* Xen per vcpu emulation context */
struct kvm_vcpu_xen {
struct kvm_xen_exit exit;
struct pvclock_vcpu_time_info *pv_time;
gpa_t steal_time_addr;
struct vcpu_runstate_info *steal_time;
+ struct kvm_xen_callback cb;
};
struct kvm_vcpu_arch {
struct shared_info *shinfo;
};
+enum kvm_xen_callback_via {
+ KVM_XEN_CALLBACK_VIA_GSI,
+ KVM_XEN_CALLBACK_VIA_PCI_INTX,
+ KVM_XEN_CALLBACK_VIA_VECTOR,
+ KVM_XEN_CALLBACK_VIA_EVTCHN,
+};
+
enum kvm_irqchip_mode {
KVM_IRQCHIP_NONE,
KVM_IRQCHIP_KERNEL, /* created with KVM_CREATE_IRQCHIP */
#include "irq.h"
#include "i8254.h"
#include "x86.h"
+#include "xen.h"
/*
* check if there are pending timer events
return pending_userspace_extint(v);
else
return v->kvm->arch.vpic->output;
- } else
+ } else if (kvm_xen_has_interrupt(v) != -1)
+ return 1;
+ else
return 0;
}
if (kvm_cpu_has_extint(v))
return 1;
- return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
+ return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
}
EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
v->arch.pending_external_vector = -1;
return vector;
- } else
+ } else {
+ int vector = kvm_xen_get_interrupt(v);
+
+ if (vector)
+ return vector; /* Xen */
return kvm_pic_read_irq(v->kvm); /* PIC */
+ }
} else
return -1;
}
#include "lapic.h"
#include "hyperv.h"
+#include "xen.h"
#include "x86.h"
static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
int r;
switch (e->type) {
+ case KVM_IRQ_ROUTING_XEN_EVTCHN:
+ return kvm_xen_set_evtchn(e, kvm, irq_source_id, level,
+ line_status);
case KVM_IRQ_ROUTING_HV_SINT:
return kvm_hv_set_sint(e, kvm, irq_source_id, level,
line_status);
e->hv_sint.vcpu = ue->u.hv_sint.vcpu;
e->hv_sint.sint = ue->u.hv_sint.sint;
break;
+ case KVM_IRQ_ROUTING_XEN_EVTCHN:
+ e->set = kvm_xen_set_evtchn;
+ e->evtchn.vcpu = ue->u.evtchn.vcpu;
+ e->evtchn.vector = ue->u.evtchn.vector;
+ e->evtchn.via = ue->u.evtchn.via;
+
+ return kvm_xen_setup_evtchn(kvm, e);
default:
return -EINVAL;
}
#include "x86.h"
#include "xen.h"
+#include "ioapic.h"
#include <linux/kvm_host.h>
#include <linux/sched/stat.h>
#include "trace.h"
+static void *xen_vcpu_info(struct kvm_vcpu *v);
+
+int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_xen *vcpu_xen = vcpu_to_xen_vcpu(vcpu);
+ struct vcpu_info *vcpu_info = xen_vcpu_info(vcpu);
+
+ if (!!atomic_read(&vcpu_xen->cb.queued) || (vcpu_info &&
+ test_bit(0, (unsigned long *) &vcpu_info->evtchn_upcall_pending)))
+ return 1;
+
+ return -1;
+}
+
+int kvm_xen_get_interrupt(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_xen *vcpu_xen = vcpu_to_xen_vcpu(vcpu);
+ u32 vector = vcpu_xen->cb.vector;
+
+ if (kvm_xen_has_interrupt(vcpu) == -1)
+ return 0;
+
+ atomic_set(&vcpu_xen->cb.queued, 0);
+ return vector;
+}
+
+static int kvm_xen_do_upcall(struct kvm *kvm, u32 dest_vcpu,
+ u32 via, u32 vector, int level)
+{
+ struct kvm_vcpu_xen *vcpu_xen;
+ struct kvm_lapic_irq irq;
+ struct kvm_vcpu *vcpu;
+
+ if (vector > 0xff || vector < 0x10 || dest_vcpu >= KVM_MAX_VCPUS)
+ return -EINVAL;
+
+ vcpu = kvm_get_vcpu(kvm, dest_vcpu);
+ if (!vcpu)
+ return -EINVAL;
+
+ memset(&irq, 0, sizeof(irq));
+ if (via == KVM_XEN_CALLBACK_VIA_VECTOR) {
+ vcpu_xen = vcpu_to_xen_vcpu(vcpu);
+ atomic_set(&vcpu_xen->cb.queued, 1);
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ kvm_vcpu_kick(vcpu);
+ } else if (via == KVM_XEN_CALLBACK_VIA_EVTCHN) {
+ irq.shorthand = APIC_DEST_SELF;
+ irq.dest_mode = APIC_DEST_PHYSICAL;
+ irq.delivery_mode = APIC_DM_FIXED;
+ irq.vector = vector;
+ irq.level = level;
+
+ /* Deliver upcall to a vector on the destination vcpu */
+ kvm_irq_delivery_to_apic(kvm, vcpu->arch.apic, &irq, NULL);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int kvm_xen_set_evtchn(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id, int level,
+ bool line_status)
+{
+ /*
+ * The routing information for the kirq specifies the vector
+ * on the destination vcpu.
+ */
+ return kvm_xen_do_upcall(kvm, e->evtchn.vcpu, e->evtchn.via,
+ e->evtchn.vector, level);
+}
+
+int kvm_xen_setup_evtchn(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *e)
+{
+ struct kvm_vcpu_xen *vcpu_xen;
+ struct kvm_vcpu *vcpu = NULL;
+
+ if (e->evtchn.vector > 0xff || e->evtchn.vector < 0x10)
+ return -EINVAL;
+
+ /* Expect vcpu to be sane */
+ if (e->evtchn.vcpu >= KVM_MAX_VCPUS)
+ return -EINVAL;
+
+ vcpu = kvm_get_vcpu(kvm, e->evtchn.vcpu);
+ if (!vcpu)
+ return -EINVAL;
+
+ vcpu_xen = vcpu_to_xen_vcpu(vcpu);
+ if (e->evtchn.via == KVM_XEN_CALLBACK_VIA_VECTOR) {
+ vcpu_xen->cb.via = KVM_XEN_CALLBACK_VIA_VECTOR;
+ vcpu_xen->cb.vector = e->evtchn.vector;
+ } else if (e->evtchn.via == KVM_XEN_CALLBACK_VIA_EVTCHN) {
+ vcpu_xen->cb.via = KVM_XEN_CALLBACK_VIA_EVTCHN;
+ vcpu_xen->cb.vector = e->evtchn.vector;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void set_vcpu_attr(struct kvm_vcpu *v, u16 type, gpa_t gpa, void *addr)
{
struct kvm_vcpu_xen *vcpu_xen = vcpu_to_xen_vcpu(v);
bool kvm_xen_hypercall_set(struct kvm *kvm);
int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
+int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
+int kvm_xen_get_interrupt(struct kvm_vcpu *vcpu);
+
+int kvm_xen_set_evtchn(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id, int level,
+ bool line_status);
+int kvm_xen_setup_evtchn(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *e);
+
void kvm_xen_destroy_vm(struct kvm *kvm);
void kvm_xen_vcpu_uninit(struct kvm_vcpu *vcpu);
u32 sint;
};
+/*
+ * struct kvm_xen_evtchn: currently specifies the upcall vector setup to
+ * deliver the interrupt to the guest.
+ *
+ * via = XEN_PARAM_CALLBACK_VIA_TYPE_GSI|_PCI
+ * vcpu: always deliver to vcpu-0
+ * vector: is used as upcall-vector
+ * EOI: none
+ * via = XEN_PARAM_CALLBACK_VIA_TYPE_VECTOR
+ * vcpu: deliver to specified vcpu
+ * vector: used as upcall-vector
+ * EOI: none
+ * via = XEN_PARAM_CALLBACK_VIA_TYPE_EVTCHN
+ * vcpu: deliver to specified vcpu (vector should be bound to the vcpu)
+ * vector: used as upcall-vector
+ * EOI: expected
+ */
+struct kvm_xen_evtchn {
+ u32 via;
+ u32 vcpu;
+ u32 vector;
+};
+
struct kvm_kernel_irq_routing_entry {
u32 gsi;
u32 type;
} msi;
struct kvm_s390_adapter_int adapter;
struct kvm_hv_sint hv_sint;
+ struct kvm_xen_evtchn evtchn;
};
struct hlist_node link;
};
__u32 sint;
};
+struct kvm_irq_routing_xen_evtchn {
+ __u32 via;
+ __u32 vcpu;
+ __u32 vector;
+};
+
/* gsi routing entry types */
#define KVM_IRQ_ROUTING_IRQCHIP 1
#define KVM_IRQ_ROUTING_MSI 2
#define KVM_IRQ_ROUTING_S390_ADAPTER 3
#define KVM_IRQ_ROUTING_HV_SINT 4
+#define KVM_IRQ_ROUTING_XEN_EVTCHN 5
struct kvm_irq_routing_entry {
__u32 gsi;
struct kvm_irq_routing_msi msi;
struct kvm_irq_routing_s390_adapter adapter;
struct kvm_irq_routing_hv_sint hv_sint;
+ struct kvm_irq_routing_xen_evtchn evtchn;
__u32 pad[8];
} u;
};