set_irq_regs(old_regs);
 }
 
-int hv_setup_vmbus_irq(int irq, void (*handler)(void))
+void hv_setup_vmbus_handler(void (*handler)(void))
 {
-       /*
-        * The 'irq' argument is ignored on x86/x64 because a hard-coded
-        * interrupt vector is used for Hyper-V interrupts.
-        */
        vmbus_handler = handler;
-       return 0;
 }
+EXPORT_SYMBOL_GPL(hv_setup_vmbus_handler);
 
-void hv_remove_vmbus_irq(void)
+void hv_remove_vmbus_handler(void)
 {
        /* We have no way to deallocate the interrupt gate */
        vmbus_handler = NULL;
 }
-EXPORT_SYMBOL_GPL(hv_setup_vmbus_irq);
-EXPORT_SYMBOL_GPL(hv_remove_vmbus_irq);
+EXPORT_SYMBOL_GPL(hv_remove_vmbus_handler);
 
 /*
  * Routines to do per-architecture handling of stimer0
 
 #include <linux/version.h>
 #include <linux/random.h>
 #include <linux/clockchips.h>
+#include <linux/interrupt.h>
 #include <clocksource/hyperv_timer.h>
 #include <asm/mshyperv.h>
 #include "hyperv_vmbus.h"
        hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
 
        /* Setup the shared SINT. */
+       if (vmbus_irq != -1)
+               enable_percpu_irq(vmbus_irq, 0);
        shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
                                        VMBUS_MESSAGE_SINT);
 
-       shared_sint.vector = hv_get_vector();
+       shared_sint.vector = vmbus_interrupt;
        shared_sint.masked = false;
 
        /*
        sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
        sctrl.enable = 0;
        hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
+
+       if (vmbus_irq != -1)
+               disable_percpu_irq(vmbus_irq);
 }
 
 
 
 
 static void *hv_panic_page;
 
+static long __percpu *vmbus_evt;
+
 /* Values parsed from ACPI DSDT */
-static int vmbus_irq;
+int vmbus_irq;
 int vmbus_interrupt;
 
 /*
                        tasklet_schedule(&hv_cpu->msg_dpc);
        }
 
-       add_interrupt_randomness(hv_get_vector(), 0);
+       add_interrupt_randomness(vmbus_interrupt, 0);
+}
+
+static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
+{
+       vmbus_isr();
+       return IRQ_HANDLED;
 }
 
 /*
        if (ret)
                return ret;
 
-       ret = hv_setup_vmbus_irq(vmbus_irq, vmbus_isr);
-       if (ret)
-               goto err_setup;
+       /*
+        * VMbus interrupts are best modeled as per-cpu interrupts. If
+        * on an architecture with support for per-cpu IRQs (e.g. ARM64),
+        * allocate a per-cpu IRQ using standard Linux kernel functionality.
+        * If not on such an architecture (e.g., x86/x64), then rely on
+        * code in the arch-specific portion of the code tree to connect
+        * the VMbus interrupt handler.
+        */
+
+       if (vmbus_irq == -1) {
+               hv_setup_vmbus_handler(vmbus_isr);
+       } else {
+               vmbus_evt = alloc_percpu(long);
+               ret = request_percpu_irq(vmbus_irq, vmbus_percpu_isr,
+                               "Hyper-V VMbus", vmbus_evt);
+               if (ret) {
+                       pr_err("Can't request Hyper-V VMbus IRQ %d, Err %d",
+                                       vmbus_irq, ret);
+                       free_percpu(vmbus_evt);
+                       goto err_setup;
+               }
+       }
 
        ret = hv_synic_alloc();
        if (ret)
 err_cpuhp:
        hv_synic_free();
 err_alloc:
-       hv_remove_vmbus_irq();
+       if (vmbus_irq == -1) {
+               hv_remove_vmbus_handler();
+       } else {
+               free_percpu_irq(vmbus_irq, vmbus_evt);
+               free_percpu(vmbus_evt);
+       }
 err_setup:
        bus_unregister(&hv_bus);
        unregister_sysctl_table(hv_ctl_table_hdr);
                ret = -ETIMEDOUT;
                goto cleanup;
        }
+
+       /*
+        * If we're on an architecture with a hardcoded hypervisor
+        * vector (i.e. x86/x64), override the VMbus interrupt found
+        * in the ACPI tables. Ensure vmbus_irq is not set since the
+        * normal Linux IRQ mechanism is not used in this case.
+        */
+#ifdef HYPERVISOR_CALLBACK_VECTOR
+       vmbus_interrupt = HYPERVISOR_CALLBACK_VECTOR;
+       vmbus_irq = -1;
+#endif
+
        hv_debug_init();
 
        ret = vmbus_bus_init();
        vmbus_connection.conn_state = DISCONNECTED;
        hv_stimer_global_cleanup();
        vmbus_disconnect();
-       hv_remove_vmbus_irq();
+       if (vmbus_irq == -1) {
+               hv_remove_vmbus_handler();
+       } else {
+               free_percpu_irq(vmbus_irq, vmbus_evt);
+               free_percpu(vmbus_evt);
+       }
        for_each_online_cpu(cpu) {
                struct hv_per_cpu_context *hv_cpu
                        = per_cpu_ptr(hv_context.cpu_context, cpu);
 
        }
 }
 
-int hv_setup_vmbus_irq(int irq, void (*handler)(void));
-void hv_remove_vmbus_irq(void);
-void hv_enable_vmbus_irq(void);
-void hv_disable_vmbus_irq(void);
+void hv_setup_vmbus_handler(void (*handler)(void));
+void hv_remove_vmbus_handler(void);
 
 void hv_setup_kexec_handler(void (*handler)(void));
 void hv_remove_kexec_handler(void);
 void hv_remove_crash_handler(void);
 
 extern int vmbus_interrupt;
+extern int vmbus_irq;
 
 #if IS_ENABLED(CONFIG_HYPERV)
 /*