u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
        u8 prop;
        int ret;
+       unsigned long flags;
 
        ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
                             &prop, 1);
        if (ret)
                return ret;
 
-       spin_lock(&irq->irq_lock);
+       spin_lock_irqsave(&irq->irq_lock, flags);
 
        if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
                irq->priority = LPI_PROP_PRIORITY(prop);
                irq->enabled = LPI_PROP_ENABLE_BIT(prop);
 
-               vgic_queue_irq_unlock(kvm, irq);
+               vgic_queue_irq_unlock(kvm, irq, flags);
        } else {
-               spin_unlock(&irq->irq_lock);
+               spin_unlock_irqrestore(&irq->irq_lock, flags);
        }
 
        return 0;
        int ret = 0;
        u32 *intids;
        int nr_irqs, i;
+       unsigned long flags;
 
        nr_irqs = vgic_copy_lpi_list(vcpu, &intids);
        if (nr_irqs < 0)
                }
 
                irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
-               spin_lock(&irq->irq_lock);
+               spin_lock_irqsave(&irq->irq_lock, flags);
                irq->pending_latch = pendmask & (1U << bit_nr);
-               vgic_queue_irq_unlock(vcpu->kvm, irq);
+               vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
                vgic_put_irq(vcpu->kvm, irq);
        }
 
 {
        struct kvm_vcpu *vcpu;
        struct its_ite *ite;
+       unsigned long flags;
 
        if (!its->enabled)
                return -EBUSY;
        if (!vcpu->arch.vgic_cpu.lpis_enabled)
                return -EBUSY;
 
-       spin_lock(&ite->irq->irq_lock);
+       spin_lock_irqsave(&ite->irq->irq_lock, flags);
        ite->irq->pending_latch = true;
-       vgic_queue_irq_unlock(kvm, ite->irq);
+       vgic_queue_irq_unlock(kvm, ite->irq, flags);
 
        return 0;
 }
 
        int mode = (val >> 24) & 0x03;
        int c;
        struct kvm_vcpu *vcpu;
+       unsigned long flags;
 
        switch (mode) {
        case 0x0:               /* as specified by targets */
 
                irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
 
-               spin_lock(&irq->irq_lock);
+               spin_lock_irqsave(&irq->irq_lock, flags);
                irq->pending_latch = true;
                irq->source |= 1U << source_vcpu->vcpu_id;
 
-               vgic_queue_irq_unlock(source_vcpu->kvm, irq);
+               vgic_queue_irq_unlock(source_vcpu->kvm, irq, flags);
                vgic_put_irq(source_vcpu->kvm, irq);
        }
 }
        u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
        u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0);
        int i;
+       unsigned long flags;
 
        /* GICD_ITARGETSR[0-7] are read-only */
        if (intid < VGIC_NR_PRIVATE_IRQS)
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
                int target;
 
-               spin_lock(&irq->irq_lock);
+               spin_lock_irqsave(&irq->irq_lock, flags);
 
                irq->targets = (val >> (i * 8)) & cpu_mask;
                target = irq->targets ? __ffs(irq->targets) : 0;
                irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
 
-               spin_unlock(&irq->irq_lock);
+               spin_unlock_irqrestore(&irq->irq_lock, flags);
                vgic_put_irq(vcpu->kvm, irq);
        }
 }
 {
        u32 intid = addr & 0x0f;
        int i;
+       unsigned long flags;
 
        for (i = 0; i < len; i++) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
-               spin_lock(&irq->irq_lock);
+               spin_lock_irqsave(&irq->irq_lock, flags);
 
                irq->source &= ~((val >> (i * 8)) & 0xff);
                if (!irq->source)
                        irq->pending_latch = false;
 
-               spin_unlock(&irq->irq_lock);
+               spin_unlock_irqrestore(&irq->irq_lock, flags);
                vgic_put_irq(vcpu->kvm, irq);
        }
 }
 {
        u32 intid = addr & 0x0f;
        int i;
+       unsigned long flags;
 
        for (i = 0; i < len; i++) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
-               spin_lock(&irq->irq_lock);
+               spin_lock_irqsave(&irq->irq_lock, flags);
 
                irq->source |= (val >> (i * 8)) & 0xff;
 
                if (irq->source) {
                        irq->pending_latch = true;
-                       vgic_queue_irq_unlock(vcpu->kvm, irq);
+                       vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
                } else {
-                       spin_unlock(&irq->irq_lock);
+                       spin_unlock_irqrestore(&irq->irq_lock, flags);
                }
                vgic_put_irq(vcpu->kvm, irq);
        }
 
 {
        int intid = VGIC_ADDR_TO_INTID(addr, 64);
        struct vgic_irq *irq;
+       unsigned long flags;
 
        /* The upper word is WI for us since we don't implement Aff3. */
        if (addr & 4)
        if (!irq)
                return;
 
-       spin_lock(&irq->irq_lock);
+       spin_lock_irqsave(&irq->irq_lock, flags);
 
        /* We only care about and preserve Aff0, Aff1 and Aff2. */
        irq->mpidr = val & GENMASK(23, 0);
        irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
 
-       spin_unlock(&irq->irq_lock);
+       spin_unlock_irqrestore(&irq->irq_lock, flags);
        vgic_put_irq(vcpu->kvm, irq);
 }
 
 {
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
        int i;
+       unsigned long flags;
 
        for (i = 0; i < len * 8; i++) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
-               spin_lock(&irq->irq_lock);
+               spin_lock_irqsave(&irq->irq_lock, flags);
                if (test_bit(i, &val)) {
                        /*
                         * pending_latch is set irrespective of irq type
                         * restore irq config before pending info.
                         */
                        irq->pending_latch = true;
-                       vgic_queue_irq_unlock(vcpu->kvm, irq);
+                       vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
                } else {
                        irq->pending_latch = false;
-                       spin_unlock(&irq->irq_lock);
+                       spin_unlock_irqrestore(&irq->irq_lock, flags);
                }
 
                vgic_put_irq(vcpu->kvm, irq);
        int sgi, c;
        int vcpu_id = vcpu->vcpu_id;
        bool broadcast;
+       unsigned long flags;
 
        sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
        broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
 
                irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
 
-               spin_lock(&irq->irq_lock);
+               spin_lock_irqsave(&irq->irq_lock, flags);
                irq->pending_latch = true;
 
-               vgic_queue_irq_unlock(vcpu->kvm, irq);
+               vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
                vgic_put_irq(vcpu->kvm, irq);
        }
 }
 
 {
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
        int i;
+       unsigned long flags;
 
        for_each_set_bit(i, &val, len * 8) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
-               spin_lock(&irq->irq_lock);
+               spin_lock_irqsave(&irq->irq_lock, flags);
                irq->enabled = true;
-               vgic_queue_irq_unlock(vcpu->kvm, irq);
+               vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 
                vgic_put_irq(vcpu->kvm, irq);
        }
 {
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
        int i;
+       unsigned long flags;
 
        for_each_set_bit(i, &val, len * 8) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
-               spin_lock(&irq->irq_lock);
+               spin_lock_irqsave(&irq->irq_lock, flags);
 
                irq->enabled = false;
 
-               spin_unlock(&irq->irq_lock);
+               spin_unlock_irqrestore(&irq->irq_lock, flags);
                vgic_put_irq(vcpu->kvm, irq);
        }
 }
 {
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
        int i;
+       unsigned long flags;
 
        for_each_set_bit(i, &val, len * 8) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
-               spin_lock(&irq->irq_lock);
+               spin_lock_irqsave(&irq->irq_lock, flags);
                irq->pending_latch = true;
 
-               vgic_queue_irq_unlock(vcpu->kvm, irq);
+               vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
                vgic_put_irq(vcpu->kvm, irq);
        }
 }
 {
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
        int i;
+       unsigned long flags;
 
        for_each_set_bit(i, &val, len * 8) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
-               spin_lock(&irq->irq_lock);
+               spin_lock_irqsave(&irq->irq_lock, flags);
 
                irq->pending_latch = false;
 
-               spin_unlock(&irq->irq_lock);
+               spin_unlock_irqrestore(&irq->irq_lock, flags);
                vgic_put_irq(vcpu->kvm, irq);
        }
 }
                                    bool new_active_state)
 {
        struct kvm_vcpu *requester_vcpu;
-       spin_lock(&irq->irq_lock);
+       unsigned long flags;
+       spin_lock_irqsave(&irq->irq_lock, flags);
 
        /*
         * The vcpu parameter here can mean multiple things depending on how
 
        irq->active = new_active_state;
        if (new_active_state)
-               vgic_queue_irq_unlock(vcpu->kvm, irq);
+               vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
        else
-               spin_unlock(&irq->irq_lock);
+               spin_unlock_irqrestore(&irq->irq_lock, flags);
 }
 
 /*
 {
        u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
        int i;
+       unsigned long flags;
 
        for (i = 0; i < len; i++) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
-               spin_lock(&irq->irq_lock);
+               spin_lock_irqsave(&irq->irq_lock, flags);
                /* Narrow the priority range to what we actually support */
                irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
-               spin_unlock(&irq->irq_lock);
+               spin_unlock_irqrestore(&irq->irq_lock, flags);
 
                vgic_put_irq(vcpu->kvm, irq);
        }
 {
        u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
        int i;
+       unsigned long flags;
 
        for (i = 0; i < len * 4; i++) {
                struct vgic_irq *irq;
                        continue;
 
                irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
-               spin_lock(&irq->irq_lock);
+               spin_lock_irqsave(&irq->irq_lock, flags);
 
                if (test_bit(i * 2 + 1, &val))
                        irq->config = VGIC_CONFIG_EDGE;
                else
                        irq->config = VGIC_CONFIG_LEVEL;
 
-               spin_unlock(&irq->irq_lock);
+               spin_unlock_irqrestore(&irq->irq_lock, flags);
                vgic_put_irq(vcpu->kvm, irq);
        }
 }
 {
        int i;
        int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
+       unsigned long flags;
 
        for (i = 0; i < 32; i++) {
                struct vgic_irq *irq;
                 * restore irq config before line level.
                 */
                new_level = !!(val & (1U << i));
-               spin_lock(&irq->irq_lock);
+               spin_lock_irqsave(&irq->irq_lock, flags);
                irq->line_level = new_level;
                if (new_level)
-                       vgic_queue_irq_unlock(vcpu->kvm, irq);
+                       vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
                else
-                       spin_unlock(&irq->irq_lock);
+                       spin_unlock_irqrestore(&irq->irq_lock, flags);
 
                vgic_put_irq(vcpu->kvm, irq);
        }
 
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
        struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
        int lr;
+       unsigned long flags;
 
        cpuif->vgic_hcr &= ~GICH_HCR_UIE;
 
 
                irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
 
-               spin_lock(&irq->irq_lock);
+               spin_lock_irqsave(&irq->irq_lock, flags);
 
                /* Always preserve the active bit */
                irq->active = !!(val & GICH_LR_ACTIVE_BIT);
                                irq->pending_latch = false;
                }
 
-               spin_unlock(&irq->irq_lock);
+               spin_unlock_irqrestore(&irq->irq_lock, flags);
                vgic_put_irq(vcpu->kvm, irq);
        }
 
 
        struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
        u32 model = vcpu->kvm->arch.vgic.vgic_model;
        int lr;
+       unsigned long flags;
 
        cpuif->vgic_hcr &= ~ICH_HCR_UIE;
 
                if (!irq)       /* An LPI could have been unmapped. */
                        continue;
 
-               spin_lock(&irq->irq_lock);
+               spin_lock_irqsave(&irq->irq_lock, flags);
 
                /* Always preserve the active bit */
                irq->active = !!(val & ICH_LR_ACTIVE_BIT);
                                irq->pending_latch = false;
                }
 
-               spin_unlock(&irq->irq_lock);
+               spin_unlock_irqrestore(&irq->irq_lock, flags);
                vgic_put_irq(vcpu->kvm, irq);
        }
 
        bool status;
        u8 val;
        int ret;
+       unsigned long flags;
 
 retry:
        vcpu = irq->target_vcpu;
 
        status = val & (1 << bit_nr);
 
-       spin_lock(&irq->irq_lock);
+       spin_lock_irqsave(&irq->irq_lock, flags);
        if (irq->target_vcpu != vcpu) {
-               spin_unlock(&irq->irq_lock);
+               spin_unlock_irqrestore(&irq->irq_lock, flags);
                goto retry;
        }
        irq->pending_latch = status;
-       vgic_queue_irq_unlock(vcpu->kvm, irq);
+       vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 
        if (status) {
                /* clear consumed data */
 
  *   vcpuX->vcpu_id < vcpuY->vcpu_id:
  *     spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
  *     spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
+ *
+ * Since the VGIC must support injecting virtual interrupts from ISRs, we have
+ * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer
+ * spinlocks for any lock that may be taken while injecting an interrupt.
  */
 
 /*
  * Needs to be entered with the IRQ lock already held, but will return
  * with all locks dropped.
  */
-bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq)
+bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
+                          unsigned long flags)
 {
        struct kvm_vcpu *vcpu;
 
                 * not need to be inserted into an ap_list and there is also
                 * no more work for us to do.
                 */
-               spin_unlock(&irq->irq_lock);
+               spin_unlock_irqrestore(&irq->irq_lock, flags);
 
                /*
                 * We have to kick the VCPU here, because we could be
         * We must unlock the irq lock to take the ap_list_lock where
         * we are going to insert this new pending interrupt.
         */
-       spin_unlock(&irq->irq_lock);
+       spin_unlock_irqrestore(&irq->irq_lock, flags);
 
        /* someone can do stuff here, which we re-check below */
 
-       spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
+       spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
        spin_lock(&irq->irq_lock);
 
        /*
 
        if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
                spin_unlock(&irq->irq_lock);
-               spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+               spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
 
-               spin_lock(&irq->irq_lock);
+               spin_lock_irqsave(&irq->irq_lock, flags);
                goto retry;
        }
 
        irq->vcpu = vcpu;
 
        spin_unlock(&irq->irq_lock);
-       spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+       spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
 
        kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
        kvm_vcpu_kick(vcpu);
 {
        struct kvm_vcpu *vcpu;
        struct vgic_irq *irq;
+       unsigned long flags;
        int ret;
 
        trace_vgic_update_irq_pending(cpuid, intid, level);
        if (!irq)
                return -EINVAL;
 
-       spin_lock(&irq->irq_lock);
+       spin_lock_irqsave(&irq->irq_lock, flags);
 
        if (!vgic_validate_injection(irq, level, owner)) {
                /* Nothing to see here, move along... */
-               spin_unlock(&irq->irq_lock);
+               spin_unlock_irqrestore(&irq->irq_lock, flags);
                vgic_put_irq(kvm, irq);
                return 0;
        }
        else
                irq->pending_latch = true;
 
-       vgic_queue_irq_unlock(kvm, irq);
+       vgic_queue_irq_unlock(kvm, irq, flags);
        vgic_put_irq(kvm, irq);
 
        return 0;
 int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq)
 {
        struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+       unsigned long flags;
 
        BUG_ON(!irq);
 
-       spin_lock(&irq->irq_lock);
+       spin_lock_irqsave(&irq->irq_lock, flags);
 
        irq->hw = true;
        irq->hwintid = phys_irq;
 
-       spin_unlock(&irq->irq_lock);
+       spin_unlock_irqrestore(&irq->irq_lock, flags);
        vgic_put_irq(vcpu->kvm, irq);
 
        return 0;
 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
 {
        struct vgic_irq *irq;
+       unsigned long flags;
 
        if (!vgic_initialized(vcpu->kvm))
                return -EAGAIN;
        irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
        BUG_ON(!irq);
 
-       spin_lock(&irq->irq_lock);
+       spin_lock_irqsave(&irq->irq_lock, flags);
 
        irq->hw = false;
        irq->hwintid = 0;
 
-       spin_unlock(&irq->irq_lock);
+       spin_unlock_irqrestore(&irq->irq_lock, flags);
        vgic_put_irq(vcpu->kvm, irq);
 
        return 0;
 {
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
        struct vgic_irq *irq, *tmp;
+       unsigned long flags;
 
 retry:
-       spin_lock(&vgic_cpu->ap_list_lock);
+       spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
 
        list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
                struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
                /* This interrupt looks like it has to be migrated. */
 
                spin_unlock(&irq->irq_lock);
-               spin_unlock(&vgic_cpu->ap_list_lock);
+               spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
 
                /*
                 * Ensure locking order by always locking the smallest
                        vcpuB = vcpu;
                }
 
-               spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
+               spin_lock_irqsave(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
                spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
                                 SINGLE_DEPTH_NESTING);
                spin_lock(&irq->irq_lock);
 
                spin_unlock(&irq->irq_lock);
                spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
-               spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
+               spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
                goto retry;
        }
 
-       spin_unlock(&vgic_cpu->ap_list_lock);
+       spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
 }
 
 static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
        if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
                return;
 
+       DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
+
        spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
        vgic_flush_lr_state(vcpu);
        spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
        struct vgic_irq *irq;
        bool pending = false;
+       unsigned long flags;
 
        if (!vcpu->kvm->arch.vgic.enabled)
                return false;
 
-       spin_lock(&vgic_cpu->ap_list_lock);
+       spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
 
        list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
                spin_lock(&irq->irq_lock);
                        break;
        }
 
-       spin_unlock(&vgic_cpu->ap_list_lock);
+       spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
 
        return pending;
 }
 {
        struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
        bool map_is_active;
+       unsigned long flags;
 
        if (!vgic_initialized(vcpu->kvm))
                return false;
 
-       spin_lock(&irq->irq_lock);
+       spin_lock_irqsave(&irq->irq_lock, flags);
        map_is_active = irq->hw && irq->active;
-       spin_unlock(&irq->irq_lock);
+       spin_unlock_irqrestore(&irq->irq_lock, flags);
        vgic_put_irq(vcpu->kvm, irq);
 
        return map_is_active;
 
 struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
                              u32 intid);
 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq);
-bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq);
+bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
+                          unsigned long flags);
 void vgic_kick_vcpus(struct kvm *kvm);
 
 int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,