]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: x86/xen: PV oneshot timer fixes
authorDavid Woodhouse <dwmw@amazon.co.uk>
Wed, 9 Mar 2022 14:04:25 +0000 (14:04 +0000)
committerDavid Woodhouse <dwmw@amazon.co.uk>
Wed, 9 Mar 2022 16:26:54 +0000 (16:26 +0000)
Fix the case where a restored timer is supposed to have triggered not
just in the past, but before this kernel even booted. The resulting
integer wrap caused the timer to be set a very long time into the future,
and thus effectively never trigger. Trigger timers immediately when
delta_ns <= 0 to avoid that situation.

Also switch to using HRTIMER_MODE_ABS_HARD, following the changes in the
local APIC timer in commits 2c0d278f3293f ("KVM: LAPIC: Mark hrtimer to
expire in hard interrupt context") and 4d151bf3b89e7 ("KVM: LAPIC: Make
lapic timer unpinned"). Since we only support the Xen oneshot timer and
not the periodic timer, we also don't need to bother with migrating it
from one physical CPU to another when the vCPU moves; it'll get started
again soon enough anyway.

When the timer fires, set the recorded expiry time to zero so that when
userspace queries the state it correctly sees zero in the expires_ns to
indicate that the timer isn't active, and avoid duplicate events after
live migration / live update.

Finally, fix the 'delta' in kvm_xen_hcall_set_timer_op() to explicitly
use 'int64_t' instead of 'long' to make the sanity check shift by 50
bits work correctly in the 32-bit build. That last one was

Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
arch/x86/kvm/irq.c
arch/x86/kvm/xen.c
arch/x86/kvm/xen.h

index af2d26fc5458a975df02a0992efa432e9fde75c6..f371f1292ca3e0dcd3cf05132ce4fbf7153949ba 100644 (file)
@@ -156,7 +156,6 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
 {
        __kvm_migrate_apic_timer(vcpu);
        __kvm_migrate_pit_timer(vcpu);
-       __kvm_migrate_xen_timer(vcpu);
        static_call_cond(kvm_x86_migrate_timers)(vcpu);
 }
 
index 8c85a71aa8ca6bb8dc418275117b8388320bae47..7e7c8a5bff52a3f5d003941261e7576726549ff1 100644 (file)
@@ -122,6 +122,8 @@ void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu)
                e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
 
                kvm_xen_set_evtchn(&e, vcpu->kvm);
+
+               vcpu->arch.xen.timer_expires = 0;
                atomic_set(&vcpu->arch.xen.timer_pending, 0);
        }
 }
@@ -130,19 +132,9 @@ static enum hrtimer_restart xen_timer_callback(struct hrtimer *timer)
 {
        struct kvm_vcpu *vcpu = container_of(timer, struct kvm_vcpu,
                                             arch.xen.timer);
-       struct kvm_xen_evtchn e;
-
        if (atomic_read(&vcpu->arch.xen.timer_pending))
                return HRTIMER_NORESTART;
 
-       e.vcpu_id = vcpu->vcpu_id;
-       e.vcpu_idx = vcpu->vcpu_idx;
-       e.port = vcpu->arch.xen.timer_virq;
-       e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
-
-       if (kvm_xen_set_evtchn_fast(&e, vcpu->kvm) != -EWOULDBLOCK)
-               return HRTIMER_NORESTART;
-
        atomic_inc(&vcpu->arch.xen.timer_pending);
        kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
        kvm_vcpu_kick(vcpu);
@@ -150,29 +142,19 @@ static enum hrtimer_restart xen_timer_callback(struct hrtimer *timer)
        return HRTIMER_NORESTART;
 }
 
-void __kvm_migrate_xen_timer(struct kvm_vcpu *vcpu)
-{
-       struct hrtimer *timer;
-
-       if (!kvm_xen_timer_enabled(vcpu))
-               return;
-
-       timer = &vcpu->arch.xen.timer;
-       if (hrtimer_cancel(timer))
-               hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
-}
-
-static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs, u64 delta_ns)
+static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs, s64 delta_ns)
 {
-       ktime_t ktime_now;
-
        atomic_set(&vcpu->arch.xen.timer_pending, 0);
        vcpu->arch.xen.timer_expires = guest_abs;
 
-       ktime_now = ktime_get();
-       hrtimer_start(&vcpu->arch.xen.timer,
-                     ktime_add_ns(ktime_now, delta_ns),
-                     HRTIMER_MODE_ABS_PINNED);
+       if (delta_ns <= 0) {
+               xen_timer_callback(&vcpu->arch.xen.timer);
+       } else {
+               ktime_t ktime_now = ktime_get();
+               hrtimer_start(&vcpu->arch.xen.timer,
+                             ktime_add_ns(ktime_now, delta_ns),
+                             HRTIMER_MODE_ABS_HARD);
+       }
 }
 
 static void kvm_xen_stop_timer(struct kvm_vcpu *vcpu)
@@ -185,7 +167,7 @@ static void kvm_xen_stop_timer(struct kvm_vcpu *vcpu)
 static void kvm_xen_init_timer(struct kvm_vcpu *vcpu)
 {
        hrtimer_init(&vcpu->arch.xen.timer, CLOCK_MONOTONIC,
-                    HRTIMER_MODE_ABS_PINNED);
+                    HRTIMER_MODE_ABS_HARD);
        vcpu->arch.xen.timer.function = xen_timer_callback;
 }
 
@@ -1204,7 +1186,7 @@ static bool kvm_xen_hcall_set_timer_op(struct kvm_vcpu *vcpu, uint64_t timeout,
 
        if (timeout) {
                uint64_t guest_now = get_kvmclock_ns(vcpu->kvm);
-               long delta = timeout - guest_now;
+               int64_t delta = timeout - guest_now;
 
                /* Xen has a 'Linux workaround' in do_set_timer_op() which
                 * checks for negative absolute timeout values (caused by
index ad0876a7c3016a12a2945179f0c5db5b6261ffac..2bbbc1a3953e2d466e3f6bd5168de5026e4360d1 100644 (file)
@@ -75,7 +75,6 @@ static inline int kvm_xen_has_pending_timer(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-void __kvm_migrate_xen_timer(struct kvm_vcpu *vcpu);
 void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu);
 #else
 static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)