From: Joao Martins Date: Mon, 17 Sep 2018 11:04:54 +0000 (-0400) Subject: i386/xen: handle PV timer hypercalls X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=4b97942369a80cb9afd712a6bc0eafaaf6346125;p=users%2Fdwmw2%2Fqemu.git i386/xen: handle PV timer hypercalls Introduce support for one shot and periodic mode of Xen PV timers, whereby timer interrupts come through a special virq event channel with deadlines being set through: 1) set_timer_op hypercall (only oneshot) 2) vcpu_op hypercall for {set,stop}_{singleshot,periodic}_timer hypercalls Signed-off-by: Joao Martins --- diff --git a/target/i386/xen-proto.h b/target/i386/xen-proto.h index e8f21d5dd9..1176bdf2e9 100644 --- a/target/i386/xen-proto.h +++ b/target/i386/xen-proto.h @@ -44,6 +44,9 @@ typedef struct XenCPUState { struct XenCallbackVector cb; #define NR_VIRQS 24 struct XenEvtChn *virq_to_evtchn[NR_VIRQS]; + struct QEMUTimer *oneshot_timer; + struct QEMUTimer *periodic_timer; + unsigned long period_ns; } XenCPUState; #endif diff --git a/target/i386/xen.c b/target/i386/xen.c index 92a6a459da..c20fa5629d 100644 --- a/target/i386/xen.c +++ b/target/i386/xen.c @@ -75,6 +75,20 @@ static void *gva_to_hva(CPUState *cs, uint64_t gva) return gpa_to_hva(gva_to_gpa(cs, gva)); } +static uint64_t kvm_get_current_ns(CPUState *cs) +{ + struct kvm_clock_data data; + int ret; + + ret = kvm_vm_ioctl(cs->kvm_state, KVM_GET_CLOCK, &data); + if (ret < 0) { + fprintf(stderr, "KVM_GET_CLOCK failed: %s\n", strerror(ret)); + abort(); + } + + return data.clock; +} + static void arch_init_hypercall_page(CPUState *cs, void *addr) { CPUX86State *env = cs->env_ptr; @@ -474,6 +488,135 @@ static int vcpuop_register_runstate_info(CPUState *cs, CPUState *target, return xen_set_vcpu_attr(target, KVM_XEN_ATTR_TYPE_VCPU_RUNSTATE, gpa); } +static void xen_vcpu_timer_event(void *opaque) +{ + CPUState *cpu = opaque; + XenCPUState *xcpu = &X86_CPU(cpu)->env.xen_vcpu; + struct XenEvtChn *evtchn = xcpu->virq_to_evtchn[VIRQ_TIMER]; + + if (likely(evtchn)) { + evtchn_2l_set_pending(X86_CPU(cpu), evtchn); + } +} + +static void xen_vcpu_periodic_timer_event(void *opaque) +{ + CPUState *cpu = opaque; + XenCPUState *xcpu = &X86_CPU(cpu)->env.xen_vcpu; + struct XenEvtChn *evtchn = xcpu->virq_to_evtchn[VIRQ_TIMER]; + unsigned long now; + + if (likely(evtchn)) { + evtchn_2l_set_pending(X86_CPU(cpu), evtchn); + } + + now = kvm_get_current_ns(cpu); + timer_mod_ns(xcpu->periodic_timer, now + xcpu->period_ns); +} + +static int xen_vcpu_timer_init(CPUState *cpu) +{ + XenCPUState *xcpu = &X86_CPU(cpu)->env.xen_vcpu; + QEMUTimer *timer; + + if (xcpu->oneshot_timer) { + return 0; + } + + timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, xen_vcpu_timer_event, cpu); + if (!timer) { + return -ENOMEM; + } + + xcpu->oneshot_timer = timer; + return 0; +} + +static int vcpuop_set_singleshot_timer(CPUState *cs, CPUState *target, + uint64_t arg) +{ + XenCPUState *xt = &X86_CPU(target)->env.xen_vcpu; + struct vcpu_set_singleshot_timer *sst; + long now, qemu_now, interval; + + if (xen_vcpu_timer_init(target)) { + return -EFAULT; + } + + sst = gva_to_hva(cs, arg); + if (!sst) { + return -EFAULT; + } + + now = kvm_get_current_ns(cs); + qemu_now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + interval = sst->timeout_abs_ns - now; + + if ((sst->flags & VCPU_SSHOTTMR_future) && + sst->timeout_abs_ns < now) { + return -ETIME; + } + + timer_mod_ns(xt->oneshot_timer, qemu_now + interval); + + return 0; +} + +static void vcpuop_stop_singleshot_timer(CPUState *cs, CPUState *target, + uint64_t arg) +{ + XenCPUState *xt = &X86_CPU(target)->env.xen_vcpu; + + if (likely(xt->oneshot_timer)) { + timer_del(xt->oneshot_timer); + } +} + +static int vcpuop_set_periodic_timer(CPUState *cs, CPUState *target, + uint64_t arg) +{ + XenCPUState *xt = &X86_CPU(target)->env.xen_vcpu; + struct vcpu_set_periodic_timer *spt; + unsigned long now; + + if (!xt->periodic_timer) { + QEMUTimer *timer; + + timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + xen_vcpu_periodic_timer_event, target); + if (!timer) { + return -EFAULT; + } + xt->periodic_timer = timer; + } + + spt = gva_to_hva(cs, arg); + if (!spt) { + return -EFAULT; + } + + if (spt->period_ns) { + return -EFAULT; + } + + timer_del(xt->periodic_timer); + xt->period_ns = spt->period_ns; + + now = kvm_get_current_ns(cs); + timer_mod_ns(xt->periodic_timer, now + xt->period_ns); + + return 0; +} + +static void vcpuop_stop_periodic_timer(CPUState *cs, CPUState *target, + uint64_t arg) +{ + XenCPUState *xt = &X86_CPU(target)->env.xen_vcpu; + + if (unlikely(xt->periodic_timer)) + timer_del(xt->periodic_timer); +} + static int kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu, int cmd, int vcpu_id, uint64_t arg) { @@ -494,6 +637,24 @@ static int kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu, err = vcpuop_register_vcpu_info(cs, dest, arg); break; } + case VCPUOP_set_singleshot_timer: { + err = vcpuop_set_singleshot_timer(cs, dest, arg); + break; + } + case VCPUOP_stop_singleshot_timer: { + vcpuop_stop_singleshot_timer(cs, dest, arg); + err = 0; + break; + } + case VCPUOP_set_periodic_timer: { + err = vcpuop_set_periodic_timer(cs, dest, arg); + break; + } + case VCPUOP_stop_periodic_timer: { + vcpuop_stop_periodic_timer(cs, dest, arg); + err = 0; + break; + } } exit->u.hcall.result = err; @@ -603,11 +764,45 @@ static int kvm_xen_hcall_sched_op(struct kvm_xen_exit *exit, X86CPU *cpu, return err ? HCALL_ERR : 0; } +static int kvm_xen_hcall_set_timer_op(struct kvm_xen_exit *exit, X86CPU *cpu, + uint64_t timeout) +{ + XenCPUState *xcpu = &cpu->env.xen_vcpu; + long qemu_now, now, offset = 0; + int err = -ENOSYS; + + if (xen_vcpu_timer_init(CPU(cpu))) { + goto error; + } + + qemu_now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + now = kvm_get_current_ns(CPU(cpu)); + offset = timeout - now; + + err = 0; + if (timeout == 0) { + timer_del(xcpu->oneshot_timer); + } else if (unlikely(timeout < now) || ((uint32_t) (offset >> 50) != 0)) { + offset = (50 * SCALE_MS); + timer_mod_ns(xcpu->oneshot_timer, qemu_now + offset); + } else { + xcpu->oneshot_timer->opaque = CPU(cpu); + timer_mod_ns(xcpu->oneshot_timer, qemu_now + offset); + } + +error: + exit->u.hcall.result = err; + return err ? HCALL_ERR : 0; +} + static int __kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit) { uint16_t code = exit->u.hcall.input; switch (code) { + case __HYPERVISOR_set_timer_op: + return kvm_xen_hcall_set_timer_op(exit, cpu, + exit->u.hcall.params[0]); case HVMOP_set_evtchn_upcall_vector: return kvm_xen_hcall_evtchn_upcall_vector(exit, cpu, exit->u.hcall.params[0]); diff --git a/target/i386/xen_evtchn.c b/target/i386/xen_evtchn.c index 4f3f5934d0..2187f38d7c 100644 --- a/target/i386/xen_evtchn.c +++ b/target/i386/xen_evtchn.c @@ -159,7 +159,7 @@ static void evtchn_2l_vcpu_set_pending(X86CPU *cpu) kvm_xen_vcpu_inject_upcall(cpu); } -static void evtchn_2l_set_pending(X86CPU *cpu, XenEvtChn *evtchn) +void evtchn_2l_set_pending(X86CPU *cpu, XenEvtChn *evtchn) { struct shared_info *shared_info = CPU(cpu)->xen_state->shared_info; struct vcpu_info *vcpu_info = cpu->env.xen_vcpu.info; diff --git a/target/i386/xen_evtchn.h b/target/i386/xen_evtchn.h index 72af31a18c..c0f45707e5 100644 --- a/target/i386/xen_evtchn.h +++ b/target/i386/xen_evtchn.h @@ -25,4 +25,6 @@ int kvm_xen_evtchn_status(X86CPU *cpu, void *arg); int kvm_xen_evtchn_send(X86CPU *cpu, void *arg); int kvm_xen_evtchn_vcpu_init(X86CPU *cpu, struct vcpu_info *info); +void evtchn_2l_set_pending(X86CPU *cpu, XenEvtChn *evtchn); + #endif