return result;
 }
 
+void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr)
+{
+       struct kvm_lapic *apic = vcpu->arch.apic;
+       int i;
+
+       for (i = 0; i < 8; i++)
+               apic_set_reg(apic, APIC_TMR + 0x10 * i, tmr[i]);
+}
+
 static void apic_update_ppr(struct kvm_lapic *apic)
 {
        u32 tpr, isrv, ppr, old_ppr;
                if (dest_map)
                        __set_bit(vcpu->vcpu_id, dest_map);
 
-               if (trig_mode) {
-                       apic_debug("level trig mode for vector %d", vector);
-                       apic_set_vector(vector, apic->regs + APIC_TMR);
-               } else
-                       apic_clear_vector(vector, apic->regs + APIC_TMR);
-
                result = !apic_test_and_set_irr(vector, apic);
                trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
                                          trig_mode, vector, !result);
 
 u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu);
 void kvm_apic_set_version(struct kvm_vcpu *vcpu);
 
+void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr);
 int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
 int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
 
 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
 {
        u64 eoi_exit_bitmap[4];
+       u32 tmr[8];
 
        if (!kvm_apic_hw_enabled(vcpu->arch.apic))
                return;
 
        memset(eoi_exit_bitmap, 0, 32);
+       memset(tmr, 0, 32);
 
-       kvm_ioapic_scan_entry(vcpu, eoi_exit_bitmap);
+       kvm_ioapic_scan_entry(vcpu, eoi_exit_bitmap, tmr);
        kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
+       kvm_apic_update_tmr(vcpu, tmr);
 }
 
 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
        smp_wmb();
 }
 
-void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
+                       u32 *tmr)
 {
        struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
        union kvm_ioapic_redirect_entry *e;
                         kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC,
                                 index) || index == RTC_GSI)) {
                        if (kvm_apic_match_dest(vcpu, NULL, 0,
-                               e->fields.dest_id, e->fields.dest_mode))
-                               __set_bit(e->fields.vector, (unsigned long *)eoi_exit_bitmap);
+                               e->fields.dest_id, e->fields.dest_mode)) {
+                               __set_bit(e->fields.vector,
+                                       (unsigned long *)eoi_exit_bitmap);
+                               if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG)
+                                       __set_bit(e->fields.vector,
+                                               (unsigned long *)tmr);
+                       }
                }
        }
        spin_unlock(&ioapic->lock);
 
 int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
 int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
 void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
-void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
+void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
+                       u32 *tmr);
 
 #endif