#include "vgic.h"
 
-/*
- * Call this function to convert a u64 value to an unsigned long * bitmask
- * in a way that works on both 32-bit and 64-bit LE and BE platforms.
- *
- * Warning: Calling this function may modify *val.
- */
-static unsigned long *u64_to_bitmask(u64 *val)
-{
-#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
-       *val = (*val >> 32) | (*val << 32);
-#endif
-       return (unsigned long *)val;
-}
-
-void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
+void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
 {
        struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
 
-       if (cpuif->vgic_misr & GICH_MISR_EOI) {
-               u64 eisr = cpuif->vgic_eisr;
-               unsigned long *eisr_bmap = u64_to_bitmask(&eisr);
-               int lr;
-
-               for_each_set_bit(lr, eisr_bmap, kvm_vgic_global_state.nr_lr) {
-                       u32 intid = cpuif->vgic_lr[lr] & GICH_LR_VIRTUALID;
-
-                       WARN_ON(cpuif->vgic_lr[lr] & GICH_LR_STATE);
-
-                       /* Only SPIs require notification */
-                       if (vgic_valid_spi(vcpu->kvm, intid))
-                               kvm_notify_acked_irq(vcpu->kvm, 0,
-                                                    intid - VGIC_NR_PRIVATE_IRQS);
-               }
-       }
-
-       /* check and disable underflow maintenance IRQ */
-       cpuif->vgic_hcr &= ~GICH_HCR_UIE;
-
-       /*
-        * In the next iterations of the vcpu loop, if we sync the
-        * vgic state after flushing it, but before entering the guest
-        * (this happens for pending signals and vmid rollovers), then
-        * make sure we don't pick up any old maintenance interrupts
-        * here.
-        */
-       cpuif->vgic_eisr = 0;
+       cpuif->vgic_hcr |= GICH_HCR_UIE;
 }
 
-void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
+static bool lr_signals_eoi_mi(u32 lr_val)
 {
-       struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
-
-       cpuif->vgic_hcr |= GICH_HCR_UIE;
+       return !(lr_val & GICH_LR_STATE) && (lr_val & GICH_LR_EOI) &&
+              !(lr_val & GICH_LR_HW);
 }
 
 /*
        struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
        int lr;
 
+       cpuif->vgic_hcr &= ~GICH_HCR_UIE;
+
        for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) {
                u32 val = cpuif->vgic_lr[lr];
                u32 intid = val & GICH_LR_VIRTUALID;
                struct vgic_irq *irq;
 
+               /* Notify fds when the guest EOI'ed a level-triggered SPI */
+               if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
+                       kvm_notify_acked_irq(vcpu->kvm, 0,
+                                            intid - VGIC_NR_PRIVATE_IRQS);
+
                irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
 
                spin_lock(&irq->irq_lock);
 
 
 #include "vgic.h"
 
-void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu)
+void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
 {
        struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
-       u32 model = vcpu->kvm->arch.vgic.vgic_model;
-
-       if (cpuif->vgic_misr & ICH_MISR_EOI) {
-               unsigned long eisr_bmap = cpuif->vgic_eisr;
-               int lr;
-
-               for_each_set_bit(lr, &eisr_bmap, kvm_vgic_global_state.nr_lr) {
-                       u32 intid;
-                       u64 val = cpuif->vgic_lr[lr];
-
-                       if (model == KVM_DEV_TYPE_ARM_VGIC_V3)
-                               intid = val & ICH_LR_VIRTUAL_ID_MASK;
-                       else
-                               intid = val & GICH_LR_VIRTUALID;
-
-                       WARN_ON(cpuif->vgic_lr[lr] & ICH_LR_STATE);
-
-                       /* Only SPIs require notification */
-                       if (vgic_valid_spi(vcpu->kvm, intid))
-                               kvm_notify_acked_irq(vcpu->kvm, 0,
-                                                    intid - VGIC_NR_PRIVATE_IRQS);
-               }
-
-               /*
-                * In the next iterations of the vcpu loop, if we sync
-                * the vgic state after flushing it, but before
-                * entering the guest (this happens for pending
-                * signals and vmid rollovers), then make sure we
-                * don't pick up any old maintenance interrupts here.
-                */
-               cpuif->vgic_eisr = 0;
-       }
 
-       cpuif->vgic_hcr &= ~ICH_HCR_UIE;
+       cpuif->vgic_hcr |= ICH_HCR_UIE;
 }
 
-void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
+static bool lr_signals_eoi_mi(u64 lr_val)
 {
-       struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
-
-       cpuif->vgic_hcr |= ICH_HCR_UIE;
+       return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) &&
+              !(lr_val & ICH_LR_HW);
 }
 
 void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
        u32 model = vcpu->kvm->arch.vgic.vgic_model;
        int lr;
 
+       cpuif->vgic_hcr &= ~ICH_HCR_UIE;
+
        for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) {
                u64 val = cpuif->vgic_lr[lr];
                u32 intid;
                        intid = val & ICH_LR_VIRTUAL_ID_MASK;
                else
                        intid = val & GICH_LR_VIRTUALID;
+
+               /* Notify fds when the guest EOI'ed a level-triggered IRQ */
+               if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
+                       kvm_notify_acked_irq(vcpu->kvm, 0,
+                                            intid - VGIC_NR_PRIVATE_IRQS);
+
                irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
                if (!irq)       /* An LPI could have been unmapped. */
                        continue;
 
        spin_unlock(&vgic_cpu->ap_list_lock);
 }
 
-static inline void vgic_process_maintenance_interrupt(struct kvm_vcpu *vcpu)
-{
-       if (kvm_vgic_global_state.type == VGIC_V2)
-               vgic_v2_process_maintenance(vcpu);
-       else
-               vgic_v3_process_maintenance(vcpu);
-}
-
 static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
 {
        if (kvm_vgic_global_state.type == VGIC_V2)
        if (unlikely(!vgic_initialized(vcpu->kvm)))
                return;
 
-       vgic_process_maintenance_interrupt(vcpu);
        vgic_fold_lr_state(vcpu);
        vgic_prune_ap_list(vcpu);
 
 
 int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
                      phys_addr_t addr, phys_addr_t alignment);
 
-void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu);
 void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
 void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
 void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
        kref_get(&irq->refcount);
 }
 
-void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu);
 void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
 void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
 void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);