if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) {
                        local_irq_enable();
+                       kvm_timer_sync_hwstate(vcpu);
                        kvm_vgic_sync_hwstate(vcpu);
                        preempt_enable();
-                       kvm_timer_sync_hwstate(vcpu);
                        continue;
                }
 
                kvm_guest_exit();
                trace_kvm_exit(kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
 
+               /*
+                * We must sync the timer state before the vgic state so that
+                * the vgic can properly sample the updated state of the
+                * interrupt line.
+                */
+               kvm_timer_sync_hwstate(vcpu);
+
                kvm_vgic_sync_hwstate(vcpu);
 
                preempt_enable();
 
-               kvm_timer_sync_hwstate(vcpu);
-
                ret = handle_exit(vcpu, run, ret);
        }
 
 
        bool                            armed;
 
        /* Timer IRQ */
-       const struct kvm_irq_level      *irq;
+       struct kvm_irq_level            irq;
 
        /* VGIC mapping */
        struct irq_phys_map             *map;
 
        u32                     virt_irq;
        u32                     phys_irq;
        u32                     irq;
-       bool                    active;
 };
 
 struct irq_phys_map_entry {
 struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
                                           int virt_irq, int irq);
 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
-bool kvm_vgic_get_phys_irq_active(struct irq_phys_map *map);
-void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active);
 
 #define irqchip_in_kernel(k)   (!!((k)->arch.vgic.in_kernel))
 #define vgic_initialized(k)    (!!((k)->arch.vgic.nr_cpus))
 
        }
 }
 
-static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
-{
-       int ret;
-       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
-
-       kvm_vgic_set_phys_irq_active(timer->map, true);
-       ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id,
-                                        timer->map,
-                                        timer->irq->level);
-       WARN_ON(ret);
-}
-
 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
 {
        struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
 
        return !(timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
-               (timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE) &&
-               !kvm_vgic_get_phys_irq_active(timer->map);
+               (timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE);
 }
 
 bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
        return cval <= now;
 }
 
+static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level)
+{
+       int ret;
+       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+       BUG_ON(!vgic_initialized(vcpu->kvm));
+
+       timer->irq.level = new_level;
+       ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id,
+                                        timer->map,
+                                        timer->irq.level);
+       WARN_ON(ret);
+}
+
+/*
+ * Check if there was a change in the timer state (should we raise or lower
+ * the line level to the GIC).
+ */
+static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
+{
+       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+       /*
+        * If userspace modified the timer registers via SET_ONE_REG before
+        * the vgic was initialized, we mustn't set the timer->irq.level value
+        * because the guest would never see the interrupt.  Instead wait
+        * until we call this function from kvm_timer_flush_hwstate.
+        */
+       if (!vgic_initialized(vcpu->kvm))
+           return;
+
+       if (kvm_timer_should_fire(vcpu) != timer->irq.level)
+               kvm_timer_update_irq(vcpu, !timer->irq.level);
+}
+
 /*
  * Schedule the background timer before calling kvm_vcpu_block, so that this
  * thread is removed from its waitqueue and made runnable when there's a timer
        bool phys_active;
        int ret;
 
-       if (kvm_timer_should_fire(vcpu))
-               kvm_timer_inject_irq(vcpu);
+       kvm_timer_update_state(vcpu);
 
        /*
-        * We keep track of whether the edge-triggered interrupt has been
-        * signalled to the vgic/guest, and if so, we mask the interrupt and
-        * the physical distributor to prevent the timer from raising a
-        * physical interrupt whenever we run a guest, preventing forward
-        * VCPU progress.
+        * If we enter the guest with the virtual input level to the VGIC
+        * asserted, then we have already told the VGIC what we need to, and
+        * we don't need to exit from the guest until the guest deactivates
+        * the already injected interrupt, so therefore we should set the
+        * hardware active state to prevent unnecessary exits from the guest.
+        *
+        * Conversely, if the virtual input level is deasserted, then always
+        * clear the hardware active state to ensure that hardware interrupts
+        * from the timer triggers a guest exit.
         */
-       if (kvm_vgic_get_phys_irq_active(timer->map))
+       if (timer->irq.level)
                phys_active = true;
        else
                phys_active = false;
 
        BUG_ON(timer_is_armed(timer));
 
-       if (kvm_timer_should_fire(vcpu))
-               kvm_timer_inject_irq(vcpu);
+       /*
+        * The guest could have modified the timer registers or the timer
+        * could have expired, update the timer state.
+        */
+       kvm_timer_update_state(vcpu);
 }
 
 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
         * kvm_vcpu_set_target(). To handle this, we determine
         * vcpu timer irq number when the vcpu is reset.
         */
-       timer->irq = irq;
+       timer->irq.irq = irq->irq;
 
        /*
         * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
         * the ARMv7 architecture.
         */
        timer->cntv_ctl = 0;
+       kvm_timer_update_state(vcpu);
 
        /*
         * Tell the VGIC that the virtual interrupt is tied to a
        default:
                return -1;
        }
+
+       kvm_timer_update_state(vcpu);
        return 0;
 }
 
 
        return false;
 }
 
-/*
- * If a mapped interrupt's state has been modified by the guest such that it
- * is no longer active or pending, without it have gone through the sync path,
- * then the map->active field must be cleared so the interrupt can be taken
- * again.
- */
-static void vgic_handle_clear_mapped_irq(struct kvm_vcpu *vcpu)
-{
-       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
-       struct list_head *root;
-       struct irq_phys_map_entry *entry;
-       struct irq_phys_map *map;
-
-       rcu_read_lock();
-
-       /* Check for PPIs */
-       root = &vgic_cpu->irq_phys_map_list;
-       list_for_each_entry_rcu(entry, root, entry) {
-               map = &entry->map;
-
-               if (!vgic_dist_irq_is_pending(vcpu, map->virt_irq) &&
-                   !vgic_irq_is_active(vcpu, map->virt_irq))
-                       map->active = false;
-       }
-
-       rcu_read_unlock();
-}
-
 bool vgic_handle_clear_pending_reg(struct kvm *kvm,
                                   struct kvm_exit_mmio *mmio,
                                   phys_addr_t offset, int vcpu_id)
                                          vcpu_id, offset);
                vgic_reg_access(mmio, reg, offset, mode);
 
-               vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id));
                vgic_update_state(kvm);
                return true;
        }
                        ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
 
        if (mmio->is_write) {
-               vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id));
                vgic_update_state(kvm);
                return true;
        }
 /*
  * Save the physical active state, and reset it to inactive.
  *
- * Return 1 if HW interrupt went from active to inactive, and 0 otherwise.
+ * Return true if there's a pending level triggered interrupt line to queue.
  */
-static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr)
+static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
 {
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
        struct irq_phys_map *map;
+       bool phys_active;
+       bool level_pending;
        int ret;
 
        if (!(vlr.state & LR_HW))
-               return 0;
+               return false;
 
        map = vgic_irq_map_search(vcpu, vlr.irq);
        BUG_ON(!map);
 
        ret = irq_get_irqchip_state(map->irq,
                                    IRQCHIP_STATE_ACTIVE,
-                                   &map->active);
+                                   &phys_active);
 
        WARN_ON(ret);
 
-       if (map->active)
+       if (phys_active)
                return 0;
 
-       return 1;
+       /* Mapped edge-triggered interrupts not yet supported. */
+       WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
+       spin_lock(&dist->lock);
+       level_pending = process_level_irq(vcpu, lr, vlr);
+       spin_unlock(&dist->lock);
+       return level_pending;
 }
 
 /* Sync back the VGIC state after a guest run */
                        continue;
 
                vlr = vgic_get_lr(vcpu, lr);
-               if (vgic_sync_hwirq(vcpu, vlr)) {
-                       /*
-                        * So this is a HW interrupt that the guest
-                        * EOI-ed. Clean the LR state and allow the
-                        * interrupt to be sampled again.
-                        */
-                       vlr.state = 0;
-                       vlr.hwirq = 0;
-                       vgic_set_lr(vcpu, lr, vlr);
-                       vgic_irq_clear_queued(vcpu, vlr.irq);
-                       set_bit(lr, elrsr_ptr);
-               }
+               if (vgic_sync_hwirq(vcpu, lr, vlr))
+                       level_pending = true;
 
                if (!test_bit(lr, elrsr_ptr))
                        continue;
        kfree(entry);
 }
 
-/**
- * kvm_vgic_get_phys_irq_active - Return the active state of a mapped IRQ
- *
- * Return the logical active state of a mapped interrupt. This doesn't
- * necessarily reflects the current HW state.
- */
-bool kvm_vgic_get_phys_irq_active(struct irq_phys_map *map)
-{
-       BUG_ON(!map);
-       return map->active;
-}
-
-/**
- * kvm_vgic_set_phys_irq_active - Set the active state of a mapped IRQ
- *
- * Set the logical active state of a mapped interrupt. This doesn't
- * immediately affects the HW state.
- */
-void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active)
-{
-       BUG_ON(!map);
-       map->active = active;
-}
-
 /**
  * kvm_vgic_unmap_phys_irq - Remove a virtual to physical IRQ mapping
  * @vcpu: The VCPU pointer
                }
 
                /*
-                * Enable all SGIs and configure all private IRQs as
-                * edge-triggered.
+                * Enable and configure all SGIs to be edge-triggere and
+                * configure all PPIs as level-triggered.
                 */
                for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
-                       if (i < VGIC_NR_SGIS)
+                       if (i < VGIC_NR_SGIS) {
+                               /* SGIs */
                                vgic_bitmap_set_irq_val(&dist->irq_enabled,
                                                        vcpu->vcpu_id, i, 1);
-                       if (i < VGIC_NR_PRIVATE_IRQS)
                                vgic_bitmap_set_irq_val(&dist->irq_cfg,
                                                        vcpu->vcpu_id, i,
                                                        VGIC_CFG_EDGE);
+                       } else if (i < VGIC_NR_PRIVATE_IRQS) {
+                               /* PPIs */
+                               vgic_bitmap_set_irq_val(&dist->irq_cfg,
+                                                       vcpu->vcpu_id, i,
+                                                       VGIC_CFG_LEVEL);
+                       }
                }
 
                vgic_enable(vcpu);