]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: Timer event should not unconditionally unhalt vcpu.
authorGleb Natapov <gleb@redhat.com>
Mon, 23 Mar 2009 13:11:44 +0000 (15:11 +0200)
committerAvi Kivity <avi@redhat.com>
Tue, 24 Mar 2009 10:05:09 +0000 (12:05 +0200)
Currently timer events are processed before entering guest mode. Move it
to main vcpu event loop since timer events should be processed even while
vcpu is halted.  Timer may cause interrupt/nmi to be injected and only then
vcpu will be unhalted.

Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/ia64/kvm/kvm-ia64.c
arch/x86/kvm/x86.c
virt/kvm/kvm_main.c

index 906d597e19cd421c869e233c490be3108810169f..fcfb11036532b2239815ad66b625201c94246012 100644 (file)
@@ -488,10 +488,10 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
                hrtimer_cancel(p_ht);
                vcpu->arch.ht_active = 0;
 
-               if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
+               if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests) ||
+                               kvm_cpu_has_pending_timer(vcpu))
                        if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
-                               vcpu->arch.mp_state =
-                                       KVM_MP_STATE_RUNNABLE;
+                               vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
 
                if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
                        return -EINTR;
index 03431b22345895fac12aa2d8863fb78cc1035f52..9cdfe1b78066e93c53544f57c0f2ee484cf5b596 100644 (file)
@@ -3126,9 +3126,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                }
        }
 
-       clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
-       kvm_inject_pending_timer_irqs(vcpu);
-
        preempt_disable();
 
        kvm_x86_ops->prepare_guest_switch(vcpu);
@@ -3228,6 +3225,7 @@ out:
        return r;
 }
 
+
 static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        int r;
@@ -3254,29 +3252,42 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                        kvm_vcpu_block(vcpu);
                        down_read(&vcpu->kvm->slots_lock);
                        if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
-                               if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
+                       {
+                               switch(vcpu->arch.mp_state) {
+                               case KVM_MP_STATE_HALTED:
                                        vcpu->arch.mp_state =
-                                                       KVM_MP_STATE_RUNNABLE;
-                       if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
-                               r = -EINTR;
+                                               KVM_MP_STATE_RUNNABLE;
+                               case KVM_MP_STATE_RUNNABLE:
+                                       break;
+                               case KVM_MP_STATE_SIPI_RECEIVED:
+                               default:
+                                       r = -EINTR;
+                                       break;
+                               }
+                       }
                }
 
-               if (r > 0) {
-                       if (dm_request_for_irq_injection(vcpu, kvm_run)) {
-                               r = -EINTR;
-                               kvm_run->exit_reason = KVM_EXIT_INTR;
-                               ++vcpu->stat.request_irq_exits;
-                       }
-                       if (signal_pending(current)) {
-                               r = -EINTR;
-                               kvm_run->exit_reason = KVM_EXIT_INTR;
-                               ++vcpu->stat.signal_exits;
-                       }
-                       if (need_resched()) {
-                               up_read(&vcpu->kvm->slots_lock);
-                               kvm_resched(vcpu);
-                               down_read(&vcpu->kvm->slots_lock);
-                       }
+               if (r <= 0)
+                       break;
+
+               clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
+               if (kvm_cpu_has_pending_timer(vcpu))
+                       kvm_inject_pending_timer_irqs(vcpu);
+
+               if (dm_request_for_irq_injection(vcpu, kvm_run)) {
+                       r = -EINTR;
+                       kvm_run->exit_reason = KVM_EXIT_INTR;
+                       ++vcpu->stat.request_irq_exits;
+               }
+               if (signal_pending(current)) {
+                       r = -EINTR;
+                       kvm_run->exit_reason = KVM_EXIT_INTR;
+                       ++vcpu->stat.signal_exits;
+               }
+               if (need_resched()) {
+                       up_read(&vcpu->kvm->slots_lock);
+                       kvm_resched(vcpu);
+                       down_read(&vcpu->kvm->slots_lock);
                }
        }
 
index 92ef725357593a92a6320bdeafadc248e395e37a..273490fd3d686317b8c4ad99a277286400d4de2b 100644 (file)
@@ -1613,11 +1613,12 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
                prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
 
                if (kvm_cpu_has_interrupt(vcpu) ||
-                   kvm_cpu_has_pending_timer(vcpu) ||
-                   kvm_arch_vcpu_runnable(vcpu)) {
+                               kvm_arch_vcpu_runnable(vcpu)) {
                        set_bit(KVM_REQ_UNHALT, &vcpu->requests);
                        break;
                }
+               if (kvm_cpu_has_pending_timer(vcpu))
+                       break;
                if (signal_pending(current))
                        break;