#define DEBUG_PASSUP
 
+static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
+                           u32 new_irq);
+
 static inline void rm_writeb(unsigned long paddr, u8 val)
 {
        __asm__ __volatile__("sync; stbcix %0,0,%1"
                : : "r" (val), "r" (paddr) : "memory");
 }
 
+/* -- ICS routines -- */
+static void ics_rm_check_resend(struct kvmppc_xics *xics,
+                               struct kvmppc_ics *ics, struct kvmppc_icp *icp)
+{
+       int i;
+
+       arch_spin_lock(&ics->lock);
+
+       for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
+               struct ics_irq_state *state = &ics->irq_state[i];
+
+               if (!state->resend)
+                       continue;
+
+               arch_spin_unlock(&ics->lock);
+               icp_rm_deliver_irq(xics, icp, state->number);
+               arch_spin_lock(&ics->lock);
+       }
+
+       arch_spin_unlock(&ics->lock);
+}
+
+/* -- ICP routines -- */
+
 static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
                                struct kvm_vcpu *this_vcpu)
 {
        return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS;
 }
 
+static void icp_rm_check_resend(struct kvmppc_xics *xics,
+                            struct kvmppc_icp *icp)
+{
+       u32 icsid;
+
+       /* Order this load with the test for need_resend in the caller */
+       smp_rmb();
+       for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
+               struct kvmppc_ics *ics = xics->ics[icsid];
+
+               if (!test_and_clear_bit(icsid, icp->resend_map))
+                       continue;
+               if (!ics)
+                       continue;
+               ics_rm_check_resend(xics, ics, icp);
+       }
+}
+
+static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
+                              u32 *reject)
+{
+       union kvmppc_icp_state old_state, new_state;
+       bool success;
+
+       do {
+               old_state = new_state = READ_ONCE(icp->state);
+
+               *reject = 0;
+
+               /* See if we can deliver */
+               success = new_state.cppr > priority &&
+                       new_state.mfrr > priority &&
+                       new_state.pending_pri > priority;
+
+               /*
+                * If we can, check for a rejection and perform the
+                * delivery
+                */
+               if (success) {
+                       *reject = new_state.xisr;
+                       new_state.xisr = irq;
+                       new_state.pending_pri = priority;
+               } else {
+                       /*
+                        * If we failed to deliver we set need_resend
+                        * so a subsequent CPPR state change causes us
+                        * to try a new delivery.
+                        */
+                       new_state.need_resend = true;
+               }
+
+       } while (!icp_rm_try_update(icp, old_state, new_state));
+
+       return success;
+}
+
+static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
+                           u32 new_irq)
+{
+       struct ics_irq_state *state;
+       struct kvmppc_ics *ics;
+       u32 reject;
+       u16 src;
+
+       /*
+        * This is used both for initial delivery of an interrupt and
+        * for subsequent rejection.
+        *
+        * Rejection can be racy vs. resends. We have evaluated the
+        * rejection in an atomic ICP transaction which is now complete,
+        * so potentially the ICP can already accept the interrupt again.
+        *
+        * So we need to retry the delivery. Essentially the reject path
+        * boils down to a failed delivery. Always.
+        *
+        * Now the interrupt could also have moved to a different target,
+        * thus we may need to re-do the ICP lookup as well
+        */
+
+ again:
+       /* Get the ICS state and lock it */
+       ics = kvmppc_xics_find_ics(xics, new_irq, &src);
+       if (!ics) {
+               /* Unsafe increment, but this does not need to be accurate */
+               return;
+       }
+       state = &ics->irq_state[src];
+
+       /* Get a lock on the ICS */
+       arch_spin_lock(&ics->lock);
+
+       /* Get our server */
+       if (!icp || state->server != icp->server_num) {
+               icp = kvmppc_xics_find_server(xics->kvm, state->server);
+               if (!icp) {
+                       /* Unsafe increment again*/
+                       goto out;
+               }
+       }
+
+       /* Clear the resend bit of that interrupt */
+       state->resend = 0;
+
+       /*
+        * If masked, bail out
+        *
+        * Note: PAPR doesn't mention anything about masked pending
+        * when doing a resend, only when doing a delivery.
+        *
+        * However that would have the effect of losing a masked
+        * interrupt that was rejected and isn't consistent with
+        * the whole masked_pending business which is about not
+        * losing interrupts that occur while masked.
+        *
+        * I don't differentiate normal deliveries and resends, this
+        * implementation will differ from PAPR and not lose such
+        * interrupts.
+        */
+       if (state->priority == MASKED) {
+               state->masked_pending = 1;
+               goto out;
+       }
+
+       /*
+        * Try the delivery, this will set the need_resend flag
+        * in the ICP as part of the atomic transaction if the
+        * delivery is not possible.
+        *
+        * Note that if successful, the new delivery might have itself
+        * rejected an interrupt that was "delivered" before we took the
+        * ics spin lock.
+        *
+        * In this case we do the whole sequence all over again for the
+        * new guy. We cannot assume that the rejected interrupt is less
+        * favored than the new one, and thus doesn't need to be delivered,
+        * because by the time we exit icp_rm_try_to_deliver() the target
+        * processor may well have already consumed & completed it, and thus
+        * the rejected interrupt might actually be already acceptable.
+        */
+       if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) {
+               /*
+                * Delivery was successful, did we reject somebody else ?
+                */
+               if (reject && reject != XICS_IPI) {
+                       arch_spin_unlock(&ics->lock);
+                       new_irq = reject;
+                       goto again;
+               }
+       } else {
+               /*
+                * We failed to deliver the interrupt we need to set the
+                * resend map bit and mark the ICS state as needing a resend
+                */
+               set_bit(ics->icsid, icp->resend_map);
+               state->resend = 1;
+
+               /*
+                * If the need_resend flag got cleared in the ICP some time
+                * between icp_rm_try_to_deliver() atomic update and now, then
+                * we know it might have missed the resend_map bit. So we
+                * retry
+                */
+               smp_mb();
+               if (!icp->state.need_resend) {
+                       arch_spin_unlock(&ics->lock);
+                       goto again;
+               }
+       }
+ out:
+       arch_spin_unlock(&ics->lock);
+}
+
 static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
                             u8 new_cppr)
 {
         * separately here as well.
         */
        if (resend) {
-               icp->rm_action |= XICS_RM_CHECK_RESEND;
-               icp->rm_resend_icp = icp;
+               icp_rm_check_resend(xics, icp);
        }
 }
 
                }
        } while (!icp_rm_try_update(icp, old_state, new_state));
 
-       /* Pass rejects to virtual mode */
+       /* Handle reject in real mode */
        if (reject && reject != XICS_IPI) {
-               this_icp->rm_action |= XICS_RM_REJECT;
-               this_icp->rm_reject = reject;
+               icp_rm_deliver_irq(xics, icp, reject);
        }
 
-       /* Pass resends to virtual mode */
+       /* Handle resends in real mode */
        if (resend) {
-               this_icp->rm_action |= XICS_RM_CHECK_RESEND;
-               this_icp->rm_resend_icp = icp;
+               icp_rm_check_resend(xics, icp);
        }
 
        return check_too_hard(xics, this_icp);
 
        } while (!icp_rm_try_update(icp, old_state, new_state));
 
-       /* Pass rejects to virtual mode */
+       /*
+        * Check for rejects. They are handled by doing a new delivery
+        * attempt (see comments in icp_rm_deliver_irq).
+        */
        if (reject && reject != XICS_IPI) {
-               icp->rm_action |= XICS_RM_REJECT;
-               icp->rm_reject = reject;
+               icp_rm_deliver_irq(xics, icp, reject);
        }
  bail:
        return check_too_hard(xics, icp);
                goto bail;
        state = &ics->irq_state[src];
 
-       /* Still asserted, resend it, we make it look like a reject */
+       /* Still asserted, resend it */
        if (state->asserted) {
-               icp->rm_action |= XICS_RM_REJECT;
-               icp->rm_reject = irq;
+               icp_rm_deliver_irq(xics, icp, irq);
        }
 
        if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {