}
        spin_unlock(&ps->inject_lock);
        if (inject) {
-               kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1);
-               kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0);
+               kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1, false);
+               kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0, false);
 
                /*
                 * Provides NMI watchdog support via Virtual Wire mode.
 
        return r;
 }
 
-int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event)
+int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
+                       bool line_status)
 {
        if (!irqchip_in_kernel(kvm))
                return -ENXIO;
 
        irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
-                                       irq_event->irq, irq_event->level);
+                                       irq_event->irq, irq_event->level,
+                                       line_status);
        return 0;
 }
 
 
        u32 gsi;
        u32 type;
        int (*set)(struct kvm_kernel_irq_routing_entry *e,
-                  struct kvm *kvm, int irq_source_id, int level);
+                  struct kvm *kvm, int irq_source_id, int level,
+                  bool line_status);
        union {
                struct {
                        unsigned irqchip;
 
 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
                                   struct kvm_userspace_memory_region *mem);
-int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level);
+int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
+                       bool line_status);
 long kvm_arch_vm_ioctl(struct file *filp,
                       unsigned int ioctl, unsigned long arg);
 
                                   union kvm_ioapic_redirect_entry *entry,
                                   unsigned long *deliver_bitmask);
 #endif
-int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
+int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
+               bool line_status);
 int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
-               int irq_source_id, int level);
+               int irq_source_id, int level, bool line_status);
 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
 void kvm_register_irq_ack_notifier(struct kvm *kvm,
 
                spin_lock(&assigned_dev->intx_mask_lock);
                if (!(assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX))
                        kvm_set_irq(assigned_dev->kvm,
-                                   assigned_dev->irq_source_id, vector, 1);
+                                   assigned_dev->irq_source_id, vector, 1,
+                                   false);
                spin_unlock(&assigned_dev->intx_mask_lock);
        } else
                kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
-                           vector, 1);
+                           vector, 1, false);
 }
 
 static irqreturn_t kvm_assigned_dev_thread_intx(int irq, void *dev_id)
                container_of(kian, struct kvm_assigned_dev_kernel,
                             ack_notifier);
 
-       kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
+       kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0, false);
 
        spin_lock(&dev->intx_mask_lock);
 
 
                if (reassert)
                        kvm_set_irq(dev->kvm, dev->irq_source_id,
-                                   dev->guest_irq, 1);
+                                   dev->guest_irq, 1, false);
        }
 
        spin_unlock(&dev->intx_mask_lock);
                                                &assigned_dev->ack_notifier);
 
        kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
-                   assigned_dev->guest_irq, 0);
+                   assigned_dev->guest_irq, 0, false);
 
        if (assigned_dev->irq_source_id != -1)
                kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
        if (match->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
                if (assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX) {
                        kvm_set_irq(match->kvm, match->irq_source_id,
-                                   match->guest_irq, 0);
+                                   match->guest_irq, 0, false);
                        /*
                         * Masking at hardware-level is performed on demand,
                         * i.e. when an IRQ actually arrives at the host.
 
        struct kvm *kvm = irqfd->kvm;
 
        if (!irqfd->resampler) {
-               kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1);
-               kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0);
+               kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
+                               false);
+               kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
+                               false);
        } else
                kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
-                           irqfd->gsi, 1);
+                           irqfd->gsi, 1, false);
 }
 
 /*
        resampler = container_of(kian, struct _irqfd_resampler, notifier);
 
        kvm_set_irq(resampler->kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
-                   resampler->notifier.gsi, 0);
+                   resampler->notifier.gsi, 0, false);
 
        rcu_read_lock();
 
                list_del(&resampler->link);
                kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
                kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
-                           resampler->notifier.gsi, 0);
+                           resampler->notifier.gsi, 0, false);
                kfree(resampler);
        }
 
                irq = rcu_dereference(irqfd->irq_entry);
                /* An event has been signaled, inject an interrupt */
                if (irq)
-                       kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
+                       kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
+                                       false);
                else
                        schedule_work(&irqfd->inject);
                rcu_read_unlock();
 
 #else
 #define ioapic_debug(fmt, arg...)
 #endif
-static int ioapic_deliver(struct kvm_ioapic *vioapic, int irq);
+static int ioapic_deliver(struct kvm_ioapic *vioapic, int irq,
+               bool line_status);
 
 static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
                                          unsigned long addr,
            __rtc_irq_eoi_tracking_restore_one(vcpu);
 }
 
-static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
+static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx,
+               bool line_status)
 {
        union kvm_ioapic_redirect_entry *pent;
        int injected = -1;
        pent = &ioapic->redirtbl[idx];
 
        if (!pent->fields.mask) {
-               injected = ioapic_deliver(ioapic, idx);
+               injected = ioapic_deliver(ioapic, idx, line_status);
                if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG)
                        pent->fields.remote_irr = 1;
        }
                        kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
                if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
                    && ioapic->irr & (1 << index))
-                       ioapic_service(ioapic, index);
+                       ioapic_service(ioapic, index, false);
                kvm_ioapic_make_eoibitmap_request(ioapic->kvm);
                break;
        }
 }
 
-static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
+static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq, bool line_status)
 {
        union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
        struct kvm_lapic_irq irqe;
 }
 
 int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
-                      int level)
+                      int level, bool line_status)
 {
        u32 old_irr;
        u32 mask = 1 << irq;
                ioapic->irr |= mask;
                if ((edge && old_irr != ioapic->irr) ||
                    (!edge && !entry.fields.remote_irr))
-                       ret = ioapic_service(ioapic, irq);
+                       ret = ioapic_service(ioapic, irq, line_status);
                else
                        ret = 0; /* report coalesced interrupt */
        }
                ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
                ent->fields.remote_irr = 0;
                if (!ent->fields.mask && (ioapic->irr & (1 << i)))
-                       ioapic_service(ioapic, i);
+                       ioapic_service(ioapic, i, false);
        }
 }
 
 
 int kvm_ioapic_init(struct kvm *kvm);
 void kvm_ioapic_destroy(struct kvm *kvm);
 int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
-                      int level);
+                      int level, bool line_status);
 void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id);
 void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
 int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
 
 #include "ioapic.h"
 
 static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
-                          struct kvm *kvm, int irq_source_id, int level)
+                          struct kvm *kvm, int irq_source_id, int level,
+                          bool line_status)
 {
 #ifdef CONFIG_X86
        struct kvm_pic *pic = pic_irqchip(kvm);
 }
 
 static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
-                             struct kvm *kvm, int irq_source_id, int level)
+                             struct kvm *kvm, int irq_source_id, int level,
+                             bool line_status)
 {
        struct kvm_ioapic *ioapic = kvm->arch.vioapic;
-       return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level);
+       return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level,
+                               line_status);
 }
 
 inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
 }
 
 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
-               struct kvm *kvm, int irq_source_id, int level)
+               struct kvm *kvm, int irq_source_id, int level, bool line_status)
 {
        struct kvm_lapic_irq irq;
 
        route.msi.address_hi = msi->address_hi;
        route.msi.data = msi->data;
 
-       return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
+       return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false);
 }
 
 /*
  *  = 0   Interrupt was coalesced (previous irq is still pending)
  *  > 0   Number of CPUs interrupt was delivered to
  */
-int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
+int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
+               bool line_status)
 {
        struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS];
        int ret = -1, i = 0;
 
        while(i--) {
                int r;
-               r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level);
+               r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level,
+                               line_status);
                if (r < 0)
                        continue;
 
 
                if (copy_from_user(&irq_event, argp, sizeof irq_event))
                        goto out;
 
-               r = kvm_vm_ioctl_irq_line(kvm, &irq_event);
+               r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
+                                       ioctl == KVM_IRQ_LINE_STATUS);
                if (r)
                        goto out;