From fc4dafe87b93ec94204896c4bc8cad7e71bdd151 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Fri, 23 May 2025 12:47:19 -0700 Subject: [PATCH] KVM: arm64: Protect vLPI translation with vgic_irq::irq_lock Though undocumented, KVM generally protects the translation of a vLPI with the its_lock. While this makes perfectly good sense, as the ITS itself contains the guest translation, an upcoming change will require twiddling the vLPI mapping in an atomic context. Switch to using the vIRQ's irq_lock to protect the translation. Use of the its_lock in vgic_v4_unset_forwarding() is preserved for now as it still needs to walk the ITS. Tested-by: Sweet Tea Dorminy Signed-off-by: Oliver Upton Link: https://lore.kernel.org/r/20250523194722.4066715-3-oliver.upton@linux.dev Signed-off-by: Marc Zyngier --- arch/arm64/kvm/vgic/vgic-its.c | 48 +++++++++++++++++----------------- arch/arm64/kvm/vgic/vgic-v4.c | 41 ++++++++++++++++------------- 2 files changed, 47 insertions(+), 42 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 569f9da9049fe..beca12dae7795 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -306,39 +306,34 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, } } - raw_spin_unlock_irqrestore(&irq->irq_lock, flags); - if (irq->hw) - return its_prop_update_vlpi(irq->host_irq, prop, needs_inv); + ret = its_prop_update_vlpi(irq->host_irq, prop, needs_inv); - return 0; + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); + return ret; } static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu) { - int ret = 0; - unsigned long flags; + struct its_vlpi_map map; + int ret; - raw_spin_lock_irqsave(&irq->irq_lock, flags); + guard(raw_spinlock_irqsave)(&irq->irq_lock); irq->target_vcpu = vcpu; - raw_spin_unlock_irqrestore(&irq->irq_lock, flags); - if (irq->hw) { - struct its_vlpi_map map; - - ret = its_get_vlpi(irq->host_irq, &map); - if (ret) - return ret; + if (!irq->hw) + return 0; - if (map.vpe) - atomic_dec(&map.vpe->vlpi_count); - map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; - atomic_inc(&map.vpe->vlpi_count); + ret = its_get_vlpi(irq->host_irq, &map); + if (ret) + return ret; - ret = its_map_vlpi(irq->host_irq, &map); - } + if (map.vpe) + atomic_dec(&map.vpe->vlpi_count); - return ret; + map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; + atomic_inc(&map.vpe->vlpi_count); + return its_map_vlpi(irq->host_irq, &map); } static struct kvm_vcpu *collection_to_vcpu(struct kvm *kvm, @@ -756,12 +751,17 @@ int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi) /* Requires the its_lock to be held. */ static void its_free_ite(struct kvm *kvm, struct its_ite *ite) { + struct vgic_irq *irq = ite->irq; list_del(&ite->ite_list); /* This put matches the get in vgic_add_lpi. */ - if (ite->irq) { - if (ite->irq->hw) - WARN_ON(its_unmap_vlpi(ite->irq->host_irq)); + if (irq) { + scoped_guard(raw_spinlock_irqsave, &irq->irq_lock) { + if (irq->hw) + WARN_ON(its_unmap_vlpi(ite->irq->host_irq)); + + irq->hw = false; + } vgic_put_irq(kvm, ite->irq); } diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index 8b25e76509981..01a5de8e9e94e 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -457,9 +457,11 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq, irq_entry->msi.data, &irq)) return 0; + raw_spin_lock_irqsave(&irq->irq_lock, flags); + /* Silently exit if the vLPI is already mapped */ if (irq->hw) - return 0; + goto out_unlock_irq; /* * Emit the mapping request. If it fails, the ITS probably @@ -479,30 +481,30 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq, ret = its_map_vlpi(virq, &map); if (ret) - return ret; + goto out_unlock_irq; irq->hw = true; irq->host_irq = virq; atomic_inc(&map.vpe->vlpi_count); /* Transfer pending state */ - raw_spin_lock_irqsave(&irq->irq_lock, flags); - if (irq->pending_latch) { - ret = irq_set_irqchip_state(irq->host_irq, - IRQCHIP_STATE_PENDING, - irq->pending_latch); - WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq); + if (!irq->pending_latch) + goto out_unlock_irq; - /* - * Clear pending_latch and communicate this state - * change via vgic_queue_irq_unlock. - */ - irq->pending_latch = false; - vgic_queue_irq_unlock(kvm, irq, flags); - } else { - raw_spin_unlock_irqrestore(&irq->irq_lock, flags); - } + ret = irq_set_irqchip_state(irq->host_irq, IRQCHIP_STATE_PENDING, + irq->pending_latch); + WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq); + + /* + * Clear pending_latch and communicate this state + * change via vgic_queue_irq_unlock. + */ + irq->pending_latch = false; + vgic_queue_irq_unlock(kvm, irq, flags); + return ret; +out_unlock_irq: + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); return ret; } @@ -511,7 +513,8 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq, { struct vgic_its *its; struct vgic_irq *irq; - int ret; + unsigned long flags; + int ret = 0; if (!vgic_supports_direct_msis(kvm)) return 0; @@ -531,6 +534,7 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq, if (ret) goto out; + raw_spin_lock_irqsave(&irq->irq_lock, flags); WARN_ON(irq->hw && irq->host_irq != virq); if (irq->hw) { atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count); @@ -538,6 +542,7 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq, ret = its_unmap_vlpi(virq); } + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); out: mutex_unlock(&its->its_lock); return ret; -- 2.50.1