IPI_WAKEUP
 };
 
+static int ipi_irq_base __read_mostly;
+static int nr_ipi __read_mostly = NR_IPI;
+static struct irq_desc *ipi_desc[NR_IPI] __read_mostly;
+
+static void ipi_setup(int cpu);
+static void ipi_teardown(int cpu);
+
 #ifdef CONFIG_HOTPLUG_CPU
 static int op_cpu_kill(unsigned int cpu);
 #else
         */
        notify_cpu_starting(cpu);
 
+       ipi_setup(cpu);
+
        store_cpu_topology(cpu);
        numa_add_cpu(cpu);
 
         * and we must not schedule until we're ready to give up the cpu.
         */
        set_cpu_online(cpu, false);
+       ipi_teardown(cpu);
 
        /*
         * OK - migrate IRQs away from this CPU
 /*
  * Main handler for inter-processor interrupts
  */
-void handle_IPI(int ipinr, struct pt_regs *regs)
+static void do_handle_IPI(int ipinr)
 {
        unsigned int cpu = smp_processor_id();
-       struct pt_regs *old_regs = set_irq_regs(regs);
 
        if ((unsigned)ipinr < NR_IPI) {
                trace_ipi_entry_rcuidle(ipi_types[ipinr]);
                break;
 
        case IPI_CALL_FUNC:
-               irq_enter();
                generic_smp_call_function_interrupt();
-               irq_exit();
                break;
 
        case IPI_CPU_STOP:
-               irq_enter();
                local_cpu_stop();
-               irq_exit();
                break;
 
        case IPI_CPU_CRASH_STOP:
                if (IS_ENABLED(CONFIG_KEXEC_CORE)) {
-                       irq_enter();
-                       ipi_cpu_crash_stop(cpu, regs);
+                       ipi_cpu_crash_stop(cpu, get_irq_regs());
 
                        unreachable();
                }
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
        case IPI_TIMER:
-               irq_enter();
                tick_receive_broadcast();
-               irq_exit();
                break;
 #endif
 
 #ifdef CONFIG_IRQ_WORK
        case IPI_IRQ_WORK:
-               irq_enter();
                irq_work_run();
-               irq_exit();
                break;
 #endif
 
 
        if ((unsigned)ipinr < NR_IPI)
                trace_ipi_exit_rcuidle(ipi_types[ipinr]);
+}
+
+/* Legacy version, should go away once all irqchips have been converted */
+void handle_IPI(int ipinr, struct pt_regs *regs)
+{
+       struct pt_regs *old_regs = set_irq_regs(regs);
+
+       irq_enter();
+       do_handle_IPI(ipinr);
+       irq_exit();
+
        set_irq_regs(old_regs);
 }
 
+static irqreturn_t ipi_handler(int irq, void *data)
+{
+       do_handle_IPI(irq - ipi_irq_base);
+       return IRQ_HANDLED;
+}
+
+static void ipi_send(const struct cpumask *target, unsigned int ipi)
+{
+       __ipi_send_mask(ipi_desc[ipi], target);
+}
+
+static void ipi_setup(int cpu)
+{
+       int i;
+
+       if (!ipi_irq_base)
+               return;
+
+       for (i = 0; i < nr_ipi; i++)
+               enable_percpu_irq(ipi_irq_base + i, 0);
+}
+
+static void ipi_teardown(int cpu)
+{
+       int i;
+
+       if (!ipi_irq_base)
+               return;
+
+       for (i = 0; i < nr_ipi; i++)
+               disable_percpu_irq(ipi_irq_base + i);
+}
+
+void __init set_smp_ipi_range(int ipi_base, int n)
+{
+       int i;
+
+       WARN_ON(n < NR_IPI);
+       nr_ipi = min(n, NR_IPI);
+
+       for (i = 0; i < nr_ipi; i++) {
+               int err;
+
+               err = request_percpu_irq(ipi_base + i, ipi_handler,
+                                        "IPI", &irq_stat);
+               WARN_ON(err);
+
+               ipi_desc[i] = irq_to_desc(ipi_base + i);
+               irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
+       }
+
+       ipi_irq_base = ipi_base;
+       __smp_cross_call = ipi_send;
+
+       /* Setup the boot CPU immediately */
+       ipi_setup(smp_processor_id());
+}
+
 void smp_send_reschedule(int cpu)
 {
        smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);