{
struct vgic_dist *dist = &kvm->arch.vgic;
struct vgic_irq *irq = vgic_get_irq(kvm, intid), *oldirq;
- unsigned long flags;
int ret;
/* In this case there is no put, since we keep the reference. */
if (!irq)
return ERR_PTR(-ENOMEM);
- ret = xa_reserve_irq(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT);
+ ret = xa_reserve(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT);
if (ret) {
kfree(irq);
return ERR_PTR(ret);
irq->target_vcpu = vcpu;
irq->group = 1;
- xa_lock_irqsave(&dist->lpi_xa, flags);
+ xa_lock(&dist->lpi_xa);
/*
* There could be a race with another vgic_add_lpi(), so we need to
}
out_unlock:
- xa_unlock_irqrestore(&dist->lpi_xa, flags);
+ xa_unlock(&dist->lpi_xa);
if (ret)
return ERR_PTR(ret);
* kvm->arch.config_lock (mutex)
* its->cmd_lock (mutex)
* its->its_lock (mutex)
- * vgic_dist->lpi_xa.xa_lock must be taken with IRQs disabled
+ * vgic_dist->lpi_xa.xa_lock
* vgic_cpu->ap_list_lock must be taken with IRQs disabled
* vgic_irq->irq_lock must be taken with IRQs disabled
*
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
{
struct vgic_dist *dist = &kvm->arch.vgic;
- unsigned long flags;
if (!__vgic_put_irq(kvm, irq))
return;
- xa_lock_irqsave(&dist->lpi_xa, flags);
+ xa_lock(&dist->lpi_xa);
vgic_release_lpi_locked(dist, irq);
- xa_unlock_irqrestore(&dist->lpi_xa, flags);
+ xa_unlock(&dist->lpi_xa);
}
static void vgic_release_deleted_lpis(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
- unsigned long flags, intid;
+ unsigned long intid;
struct vgic_irq *irq;
- xa_lock_irqsave(&dist->lpi_xa, flags);
+ xa_lock(&dist->lpi_xa);
xa_for_each(&dist->lpi_xa, intid, irq) {
if (irq->pending_release)
vgic_release_lpi_locked(dist, irq);
}
- xa_unlock_irqrestore(&dist->lpi_xa, flags);
+ xa_unlock(&dist->lpi_xa);
}
void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)