/*
                 * If we have a singal pending, or need to notify a userspace
-                * irqchip about timer level changes, then we exit (and update
-                * the timer level state in kvm_timer_update_run below).
+                * irqchip about timer or PMU level changes, then we exit (and
+                * update the timer level state in kvm_timer_update_run
+                * below).
                 */
                if (signal_pending(current) ||
-                   kvm_timer_should_notify_user(vcpu)) {
+                   kvm_timer_should_notify_user(vcpu) ||
+                   kvm_pmu_should_notify_user(vcpu)) {
                        ret = -EINTR;
                        run->exit_reason = KVM_EXIT_INTR;
                }
        }
 
        /* Tell userspace about in-kernel device output levels */
-       kvm_timer_update_run(vcpu);
+       if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
+               kvm_timer_update_run(vcpu);
+               kvm_pmu_update_run(vcpu);
+       }
 
        if (vcpu->sigset_active)
                sigprocmask(SIG_SETMASK, &sigsaved, NULL);
 
 void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val);
 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
+bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
+void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
 static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {}
 static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
 static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
+static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
+{
+       return false;
+}
+static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
 static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
 static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
 static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
 
        struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
        struct kvm_sync_regs *regs = &vcpu->run->s.regs;
 
-       if (likely(irqchip_in_kernel(vcpu->kvm)))
-               return;
-
        /* Populate the device bitmap with the timer states */
        regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
                                    KVM_ARM_DEV_EL1_PTIMER);
 
                return;
 
        overflow = !!kvm_pmu_overflow_status(vcpu);
-       if (pmu->irq_level != overflow) {
-               pmu->irq_level = overflow;
-               kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
-                                   pmu->irq_num, overflow);
+       if (pmu->irq_level == overflow)
+               return;
+
+       pmu->irq_level = overflow;
+
+       if (likely(irqchip_in_kernel(vcpu->kvm))) {
+               int ret;
+               ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
+                                         pmu->irq_num, overflow);
+               WARN_ON(ret);
        }
 }
 
+bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
+       bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
+
+       if (likely(irqchip_in_kernel(vcpu->kvm)))
+               return false;
+
+       return pmu->irq_level != run_level;
+}
+
+/*
+ * Reflect the PMU overflow interrupt output level into the kvm_run structure
+ */
+void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
+{
+       struct kvm_sync_regs *regs = &vcpu->run->s.regs;
+
+       /* Populate the timer bitmap for user space */
+       regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
+       if (vcpu->arch.pmu.irq_level)
+               regs->device_irq_level |= KVM_ARM_DEV_PMU;
+}
+
 /**
  * kvm_pmu_flush_hwstate - flush pmu state to cpu
  * @vcpu: The vcpu pointer