Using a linked-list for LPIs is less than ideal as it of course requires
iterative searches to find a particular entry. An xarray is a better
data structure for this use case, as it provides faster searches and can
still handle a potentially sparse range of INTID allocations.
Start by storing LPIs in an xarray, punting usage of the xarray to a
subsequent change. The observant among you will notice that we added yet
another lock to the chain of locking order rules; document the ordering
of the xa_lock. Don't worry, we'll get rid of the lpi_list_lock one
day...
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20240221054253.3848076-2-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
        INIT_LIST_HEAD(&dist->lpi_list_head);
        INIT_LIST_HEAD(&dist->lpi_translation_cache);
        raw_spin_lock_init(&dist->lpi_list_lock);
+       xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ);
 }
 
 /* CREATION */
 
        if (vgic_supports_direct_msis(kvm))
                vgic_v4_teardown(kvm);
+
+       xa_destroy(&dist->lpi_xa);
 }
 
 static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
 
        if (!irq)
                return ERR_PTR(-ENOMEM);
 
+       ret = xa_reserve_irq(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT);
+       if (ret) {
+               kfree(irq);
+               return ERR_PTR(ret);
+       }
+
        INIT_LIST_HEAD(&irq->lpi_list);
        INIT_LIST_HEAD(&irq->ap_list);
        raw_spin_lock_init(&irq->irq_lock);
                goto out_unlock;
        }
 
+       ret = xa_err(xa_store(&dist->lpi_xa, intid, irq, 0));
+       if (ret) {
+               xa_release(&dist->lpi_xa, intid);
+               kfree(irq);
+               goto out_unlock;
+       }
+
        list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
        dist->lpi_list_count++;
 
 out_unlock:
        raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 
+       if (ret)
+               return ERR_PTR(ret);
+
        /*
         * We "cache" the configuration table entries in our struct vgic_irq's.
         * However we only have those structs for mapped IRQs, so we read in
 
  *         its->its_lock (mutex)
  *           vgic_cpu->ap_list_lock            must be taken with IRQs disabled
  *             kvm->lpi_list_lock              must be taken with IRQs disabled
- *               vgic_irq->irq_lock            must be taken with IRQs disabled
+ *               vgic_dist->lpi_xa.xa_lock     must be taken with IRQs disabled
+ *                 vgic_irq->irq_lock          must be taken with IRQs disabled
  *
  * As the ap_list_lock might be taken from the timer interrupt handler,
  * we have to disable IRQs before taking this lock and everything lower
                return;
 
        list_del(&irq->lpi_list);
+       xa_erase(&dist->lpi_xa, irq->intid);
        dist->lpi_list_count--;
 
        kfree(irq);
 
 #include <linux/spinlock.h>
 #include <linux/static_key.h>
 #include <linux/types.h>
+#include <linux/xarray.h>
 #include <kvm/iodev.h>
 #include <linux/list.h>
 #include <linux/jump_label.h>
 
        /* Protects the lpi_list and the count value below. */
        raw_spinlock_t          lpi_list_lock;
+       struct xarray           lpi_xa;
        struct list_head        lpi_list_head;
        int                     lpi_list_count;