seq_printf(s, "vgic_model:\t%s\n", v3 ? "GICv3" : "GICv2");
        seq_printf(s, "nr_spis:\t%d\n", dist->nr_spis);
        if (v3)
-               seq_printf(s, "nr_lpis:\t%d\n", dist->lpi_list_count);
+               seq_printf(s, "nr_lpis:\t%d\n", atomic_read(&dist->lpi_count));
        seq_printf(s, "enabled:\t%d\n", dist->enabled);
        seq_printf(s, "\n");
 
 
                goto out_unlock;
        }
 
-       dist->lpi_list_count++;
+       atomic_inc(&dist->lpi_count);
 
 out_unlock:
        raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
         * command). If coming from another path (such as enabling LPIs),
         * we must be careful not to overrun the array.
         */
-       irq_count = READ_ONCE(dist->lpi_list_count);
+       irq_count = atomic_read(&dist->lpi_count);
        intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL_ACCOUNT);
        if (!intids)
                return -ENOMEM;
 
                return;
 
        xa_erase(&dist->lpi_xa, irq->intid);
-       dist->lpi_list_count--;
+       atomic_dec(&dist->lpi_count);
 
        kfree(irq);
 }
 
         */
        u64                     propbaser;
 
-       /* Protects the lpi_list and the count value below. */
+       /* Protects the lpi_list. */
        raw_spinlock_t          lpi_list_lock;
        struct xarray           lpi_xa;
-       int                     lpi_list_count;
+       atomic_t                lpi_count;
 
        /* LPI translation cache */
        struct list_head        lpi_translation_cache;