#include <asm/xics.h>
 #include <asm/debug.h>
 #include <asm/time.h>
+#include <asm/spinlock.h>
 
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
  * LOCKING
  * =======
  *
- * Each ICS has a mutex protecting the information about the IRQ
+ * Each ICS has a spin lock protecting the information about the IRQ
  * sources and avoiding simultaneous deliveries if the same interrupt.
  *
  * ICP operations are done via a single compare & swap transaction
 {
        int i;
 
-       mutex_lock(&ics->lock);
+       unsigned long flags;
+
+       local_irq_save(flags);
+       arch_spin_lock(&ics->lock);
 
        for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
                struct ics_irq_state *state = &ics->irq_state[i];
                XICS_DBG("resend %#x prio %#x\n", state->number,
                              state->priority);
 
-               mutex_unlock(&ics->lock);
+               arch_spin_unlock(&ics->lock);
+               local_irq_restore(flags);
                icp_deliver_irq(xics, icp, state->number);
-               mutex_lock(&ics->lock);
+               local_irq_save(flags);
+               arch_spin_lock(&ics->lock);
        }
 
-       mutex_unlock(&ics->lock);
+       arch_spin_unlock(&ics->lock);
+       local_irq_restore(flags);
 }
 
 static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
                       u32 server, u32 priority, u32 saved_priority)
 {
        bool deliver;
+       unsigned long flags;
 
-       mutex_lock(&ics->lock);
+       local_irq_save(flags);
+       arch_spin_lock(&ics->lock);
 
        state->server = server;
        state->priority = priority;
                deliver = true;
        }
 
-       mutex_unlock(&ics->lock);
+       arch_spin_unlock(&ics->lock);
+       local_irq_restore(flags);
 
        return deliver;
 }
        struct kvmppc_ics *ics;
        struct ics_irq_state *state;
        u16 src;
+       unsigned long flags;
 
        if (!xics)
                return -ENODEV;
                return -EINVAL;
        state = &ics->irq_state[src];
 
-       mutex_lock(&ics->lock);
+       local_irq_save(flags);
+       arch_spin_lock(&ics->lock);
        *server = state->server;
        *priority = state->priority;
-       mutex_unlock(&ics->lock);
+       arch_spin_unlock(&ics->lock);
+       local_irq_restore(flags);
 
        return 0;
 }
        struct kvmppc_ics *ics;
        u32 reject;
        u16 src;
+       unsigned long flags;
 
        /*
         * This is used both for initial delivery of an interrupt and
        state = &ics->irq_state[src];
 
        /* Get a lock on the ICS */
-       mutex_lock(&ics->lock);
+       local_irq_save(flags);
+       arch_spin_lock(&ics->lock);
 
        /* Get our server */
        if (!icp || state->server != icp->server_num) {
         *
         * Note that if successful, the new delivery might have itself
         * rejected an interrupt that was "delivered" before we took the
-        * icp mutex.
+        * ics spin lock.
         *
         * In this case we do the whole sequence all over again for the
         * new guy. We cannot assume that the rejected interrupt is less
                 * Delivery was successful, did we reject somebody else ?
                 */
                if (reject && reject != XICS_IPI) {
-                       mutex_unlock(&ics->lock);
+                       arch_spin_unlock(&ics->lock);
+                       local_irq_restore(flags);
                        new_irq = reject;
                        goto again;
                }
                 */
                smp_mb();
                if (!icp->state.need_resend) {
-                       mutex_unlock(&ics->lock);
+                       arch_spin_unlock(&ics->lock);
+                       local_irq_restore(flags);
                        goto again;
                }
        }
  out:
-       mutex_unlock(&ics->lock);
+       arch_spin_unlock(&ics->lock);
+       local_irq_restore(flags);
 }
 
 static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
        struct kvm *kvm = xics->kvm;
        struct kvm_vcpu *vcpu;
        int icsid, i;
+       unsigned long flags;
        unsigned long t_rm_kick_vcpu, t_rm_check_resend;
        unsigned long t_rm_reject, t_rm_notify_eoi;
 
                seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n",
                           icsid);
 
-               mutex_lock(&ics->lock);
+               local_irq_save(flags);
+               arch_spin_lock(&ics->lock);
 
                for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
                        struct ics_irq_state *irq = &ics->irq_state[i];
                                   irq->resend, irq->masked_pending);
 
                }
-               mutex_unlock(&ics->lock);
+               arch_spin_unlock(&ics->lock);
+               local_irq_restore(flags);
        }
        return 0;
 }
        if (!ics)
                goto out;
 
-       mutex_init(&ics->lock);
        ics->icsid = icsid;
 
        for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
        u64 __user *ubufp = (u64 __user *) addr;
        u16 idx;
        u64 val, prio;
+       unsigned long flags;
 
        ics = kvmppc_xics_find_ics(xics, irq, &idx);
        if (!ics)
                return -ENOENT;
 
        irqp = &ics->irq_state[idx];
-       mutex_lock(&ics->lock);
+       local_irq_save(flags);
+       arch_spin_lock(&ics->lock);
        ret = -ENOENT;
        if (irqp->exists) {
                val = irqp->server;
                        val |= KVM_XICS_PENDING;
                ret = 0;
        }
-       mutex_unlock(&ics->lock);
+       arch_spin_unlock(&ics->lock);
+       local_irq_restore(flags);
 
        if (!ret && put_user(val, ubufp))
                ret = -EFAULT;
        u64 val;
        u8 prio;
        u32 server;
+       unsigned long flags;
 
        if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
                return -ENOENT;
            kvmppc_xics_find_server(xics->kvm, server) == NULL)
                return -EINVAL;
 
-       mutex_lock(&ics->lock);
+       local_irq_save(flags);
+       arch_spin_lock(&ics->lock);
        irqp->server = server;
        irqp->saved_priority = prio;
        if (val & KVM_XICS_MASKED)
        if ((val & KVM_XICS_PENDING) && (val & KVM_XICS_LEVEL_SENSITIVE))
                irqp->asserted = 1;
        irqp->exists = 1;
-       mutex_unlock(&ics->lock);
+       arch_spin_unlock(&ics->lock);
+       local_irq_restore(flags);
 
        if (val & KVM_XICS_PENDING)
                icp_deliver_irq(xics, NULL, irqp->number);