extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
                        struct kvm_vcpu *vcpu, u32 cpu);
 extern void kvmppc_xics_ipi_action(void);
+extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
+                                  unsigned long host_irq);
+extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
+                                  unsigned long host_irq);
 extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, u32 xirr,
                                 struct kvmppc_irq_map *irq_map,
                                 struct kvmppc_passthru_irqmap *pimap);
 
 int64_t opal_pci_config_write_word(uint64_t phb_id, uint64_t bus_dev_func,
                                   uint64_t offset, uint32_t data);
 int64_t opal_set_xive(uint32_t isn, uint16_t server, uint8_t priority);
+int64_t opal_rm_set_xive(uint32_t isn, uint16_t server, uint8_t priority);
 int64_t opal_get_xive(uint32_t isn, __be16 *server, uint8_t *priority);
 int64_t opal_register_exception_handler(uint64_t opal_exception,
                                        uint64_t handler_address,
 
        if (i == pimap->n_mapped)
                pimap->n_mapped++;
 
+       kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq);
+
        mutex_unlock(&kvm->lock);
 
        return 0;
                return -ENODEV;
        }
 
+       kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq);
+
        /* invalidate the entry */
        pimap->mapped[i].r_hwirq = 0;
 
 
 #include <asm/pgtable.h>
 #include <asm/ppc-opcode.h>
 #include <asm/pnv-pci.h>
+#include <asm/opal.h>
 
 #include "book3s_xics.h"
 
 
 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
                            u32 new_irq);
+static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu);
 
 /* -- ICS routines -- */
 static void ics_rm_check_resend(struct kvmppc_xics *xics,
                icp->rm_action |= XICS_RM_NOTIFY_EOI;
                icp->rm_eoied_irq = irq;
        }
+
+       if (state->host_irq && state->intr_cpu != -1) {
+               int pcpu = cpu_first_thread_sibling(raw_smp_processor_id());
+               if (state->intr_cpu != pcpu)
+                       xics_opal_rm_set_server(state->host_irq, pcpu);
+               state->intr_cpu = -1;
+       }
  bail:
        return check_too_hard(xics, icp);
 }
        _stwcix(xics_phys + XICS_XIRR, xirr);
 }
 
+static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu)
+{
+       unsigned int mangle_cpu = get_hard_smp_processor_id(server_cpu) << 2;
+
+       return opal_rm_set_xive(hw_irq, mangle_cpu, DEFAULT_PRIORITY);
+}
+
 /*
  * Increment a per-CPU 32-bit unsigned integer variable.
  * Safe to call in real-mode. Handles vmalloc'ed addresses
 
                return 0;
        }
 
+       /* Record which CPU this arrived on for passed-through interrupts */
+       if (state->host_irq)
+               state->intr_cpu = raw_smp_processor_id();
+
        /* Attempt delivery */
        icp_deliver_irq(xics, NULL, irq);
 
 {
        return pin;
 }
+
+void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq,
+                           unsigned long host_irq)
+{
+       struct kvmppc_xics *xics = kvm->arch.xics;
+       struct kvmppc_ics *ics;
+       u16 idx;
+
+       ics = kvmppc_xics_find_ics(xics, irq, &idx);
+       if (!ics)
+               return;
+
+       ics->irq_state[idx].host_irq = host_irq;
+       ics->irq_state[idx].intr_cpu = -1;
+}
+EXPORT_SYMBOL_GPL(kvmppc_xics_set_mapped);
+
+void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long irq,
+                           unsigned long host_irq)
+{
+       struct kvmppc_xics *xics = kvm->arch.xics;
+       struct kvmppc_ics *ics;
+       u16 idx;
+
+       ics = kvmppc_xics_find_ics(xics, irq, &idx);
+       if (!ics)
+               return;
+
+       ics->irq_state[idx].host_irq = 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_xics_clr_mapped);
 
        u8  lsi;                /* level-sensitive interrupt */
        u8  asserted; /* Only for LSI */
        u8  exists;
+       int intr_cpu;
+       u32 host_irq;
 };
 
 /* Atomic ICP state, updated with a single compare & swap */
 
 OPAL_CALL(opal_pci_config_write_half_word,     OPAL_PCI_CONFIG_WRITE_HALF_WORD);
 OPAL_CALL(opal_pci_config_write_word,          OPAL_PCI_CONFIG_WRITE_WORD);
 OPAL_CALL(opal_set_xive,                       OPAL_SET_XIVE);
+OPAL_CALL_REAL(opal_rm_set_xive,               OPAL_SET_XIVE);
 OPAL_CALL(opal_get_xive,                       OPAL_GET_XIVE);
 OPAL_CALL(opal_register_exception_handler,     OPAL_REGISTER_OPAL_EXCEPTION_HANDLER);
 OPAL_CALL(opal_pci_eeh_freeze_status,          OPAL_PCI_EEH_FREEZE_STATUS);