* @flags:     flags (see IRQF_* above)
  * @name:      name of the device
  * @dev_id:    cookie to identify the device
+ * @percpu_dev_id:     cookie to identify the device
  * @next:      pointer to the next irqaction for shared interrupts
  * @irq:       interrupt number
  * @dir:       pointer to the proc/irq/NN/name entry
  * @thread_mask:       bitmask for keeping track of @thread activity
  */
 struct irqaction {
-       irq_handler_t handler;
-       unsigned long flags;
-       void *dev_id;
-       struct irqaction *next;
-       int irq;
-       irq_handler_t thread_fn;
-       struct task_struct *thread;
-       unsigned long thread_flags;
-       unsigned long thread_mask;
-       const char *name;
-       struct proc_dir_entry *dir;
+       irq_handler_t           handler;
+       unsigned long           flags;
+       void                    *dev_id;
+       void __percpu           *percpu_dev_id;
+       struct irqaction        *next;
+       int                     irq;
+       irq_handler_t           thread_fn;
+       struct task_struct      *thread;
+       unsigned long           thread_flags;
+       unsigned long           thread_mask;
+       const char              *name;
+       struct proc_dir_entry   *dir;
 } ____cacheline_internodealigned_in_smp;
 
 extern irqreturn_t no_action(int cpl, void *dev_id);
 request_any_context_irq(unsigned int irq, irq_handler_t handler,
                        unsigned long flags, const char *name, void *dev_id);
 
+extern int __must_check
+request_percpu_irq(unsigned int irq, irq_handler_t handler,
+                  const char *devname, void __percpu *percpu_dev_id);
+
 extern void exit_irq_thread(void);
 #else
 
        return request_irq(irq, handler, flags, name, dev_id);
 }
 
+static inline int __must_check
+request_percpu_irq(unsigned int irq, irq_handler_t handler,
+                  const char *devname, void __percpu *percpu_dev_id)
+{
+       return request_irq(irq, handler, 0, devname, percpu_dev_id);
+}
+
 static inline void exit_irq_thread(void) { }
 #endif
 
 extern void free_irq(unsigned int, void *);
+extern void free_percpu_irq(unsigned int, void __percpu *);
 
 struct device;
 
 
 extern void disable_irq_nosync(unsigned int irq);
 extern void disable_irq(unsigned int irq);
+extern void disable_percpu_irq(unsigned int irq);
 extern void enable_irq(unsigned int irq);
+extern void enable_percpu_irq(unsigned int irq);
 
 /* The following three functions are for the core kernel use only. */
 #ifdef CONFIG_GENERIC_HARDIRQS
 
  * IRQ_NO_BALANCING            - Interrupt cannot be balanced (affinity set)
  * IRQ_MOVE_PCNTXT             - Interrupt can be migrated from process context
  * IRQ_NESTED_TRHEAD           - Interrupt nests into another thread
+ * IRQ_PER_CPU_DEVID           - Dev_id is a per-cpu variable
  */
 enum {
        IRQ_TYPE_NONE           = 0x00000000,
        IRQ_MOVE_PCNTXT         = (1 << 14),
        IRQ_NESTED_THREAD       = (1 << 15),
        IRQ_NOTHREAD            = (1 << 16),
+       IRQ_PER_CPU_DEVID       = (1 << 17),
 };
 
 #define IRQF_MODIFY_MASK       \
        (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
         IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
-        IRQ_PER_CPU | IRQ_NESTED_THREAD)
+        IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID)
 
 #define IRQ_NO_BALANCING_MASK  (IRQ_PER_CPU | IRQ_NO_BALANCING)
 
 struct irqaction;
 extern int setup_irq(unsigned int irq, struct irqaction *new);
 extern void remove_irq(unsigned int irq, struct irqaction *act);
+extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
+extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
 
 extern void irq_cpu_online(void);
 extern void irq_cpu_offline(void);
 extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
 extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
 extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
 extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
 extern void handle_nested_irq(unsigned int irq);
 
        irq_set_chip_and_handler_name(irq, chip, handle, NULL);
 }
 
+extern int irq_set_percpu_devid(unsigned int irq);
+
 extern void
 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
                  const char *name);
                irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
 }
 
+static inline void irq_set_percpu_devid_flags(unsigned int irq)
+{
+       irq_set_status_flags(irq,
+                            IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD |
+                            IRQ_NOPROBE | IRQ_PER_CPU_DEVID);
+}
+
 /* Handle dynamic irq creation and destruction */
 extern unsigned int create_irq_nr(unsigned int irq_want, int node);
 extern int create_irq(void);
 
        unsigned long           last_unhandled; /* Aging timer for unhandled count */
        unsigned int            irqs_unhandled;
        raw_spinlock_t          lock;
+       struct cpumask          *percpu_enabled;
 #ifdef CONFIG_SMP
        const struct cpumask    *affinity_hint;
        struct irq_affinity_notify *affinity_notify;
 
 int irq_set_chip(unsigned int irq, struct irq_chip *chip)
 {
        unsigned long flags;
-       struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+       struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 
        if (!desc)
                return -EINVAL;
 int irq_set_irq_type(unsigned int irq, unsigned int type)
 {
        unsigned long flags;
-       struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
+       struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
        int ret = 0;
 
        if (!desc)
 int irq_set_handler_data(unsigned int irq, void *data)
 {
        unsigned long flags;
-       struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+       struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 
        if (!desc)
                return -EINVAL;
 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
 {
        unsigned long flags;
-       struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+       struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 
        if (!desc)
                return -EINVAL;
 int irq_set_chip_data(unsigned int irq, void *data)
 {
        unsigned long flags;
-       struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+       struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 
        if (!desc)
                return -EINVAL;
        }
 }
 
+void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
+{
+       if (desc->irq_data.chip->irq_enable)
+               desc->irq_data.chip->irq_enable(&desc->irq_data);
+       else
+               desc->irq_data.chip->irq_unmask(&desc->irq_data);
+       cpumask_set_cpu(cpu, desc->percpu_enabled);
+}
+
+void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
+{
+       if (desc->irq_data.chip->irq_disable)
+               desc->irq_data.chip->irq_disable(&desc->irq_data);
+       else
+               desc->irq_data.chip->irq_mask(&desc->irq_data);
+       cpumask_clear_cpu(cpu, desc->percpu_enabled);
+}
+
 static inline void mask_ack_irq(struct irq_desc *desc)
 {
        if (desc->irq_data.chip->irq_mask_ack)
                chip->irq_eoi(&desc->irq_data);
 }
 
+/**
+ * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
+ * @irq:       the interrupt number
+ * @desc:      the interrupt description structure for this irq
+ *
+ * Per CPU interrupts on SMP machines without locking requirements. Same as
+ * handle_percpu_irq() above but with the following extras:
+ *
+ * action->percpu_dev_id is a pointer to percpu variables which
+ * contain the real device id for the cpu on which this handler is
+ * called
+ */
+void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
+{
+       struct irq_chip *chip = irq_desc_get_chip(desc);
+       struct irqaction *action = desc->action;
+       void *dev_id = __this_cpu_ptr(action->percpu_dev_id);
+       irqreturn_t res;
+
+       kstat_incr_irqs_this_cpu(irq, desc);
+
+       if (chip->irq_ack)
+               chip->irq_ack(&desc->irq_data);
+
+       trace_irq_handler_entry(irq, action);
+       res = action->handler(irq, dev_id);
+       trace_irq_handler_exit(irq, action, res);
+
+       if (chip->irq_eoi)
+               chip->irq_eoi(&desc->irq_data);
+}
+
 void
 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
                  const char *name)
 {
        unsigned long flags;
-       struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
+       struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
 
        if (!desc)
                return;
 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
 {
        unsigned long flags;
-       struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+       struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 
        if (!desc)
                return;
 
 extern void irq_shutdown(struct irq_desc *desc);
 extern void irq_enable(struct irq_desc *desc);
 extern void irq_disable(struct irq_desc *desc);
+extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu);
+extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu);
 extern void mask_irq(struct irq_desc *desc);
 extern void unmask_irq(struct irq_desc *desc);
 
                desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data);
 }
 
+#define _IRQ_DESC_CHECK                (1 << 0)
+#define _IRQ_DESC_PERCPU       (1 << 1)
+
+#define IRQ_GET_DESC_CHECK_GLOBAL      (_IRQ_DESC_CHECK)
+#define IRQ_GET_DESC_CHECK_PERCPU      (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU)
+
 struct irq_desc *
-__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus);
+__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
+                   unsigned int check);
 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus);
 
 static inline struct irq_desc *
-irq_get_desc_buslock(unsigned int irq, unsigned long *flags)
+irq_get_desc_buslock(unsigned int irq, unsigned long *flags, unsigned int check)
 {
-       return __irq_get_desc_lock(irq, flags, true);
+       return __irq_get_desc_lock(irq, flags, true, check);
 }
 
 static inline void
 }
 
 static inline struct irq_desc *
-irq_get_desc_lock(unsigned int irq, unsigned long *flags)
+irq_get_desc_lock(unsigned int irq, unsigned long *flags, unsigned int check)
 {
-       return __irq_get_desc_lock(irq, flags, false);
+       return __irq_get_desc_lock(irq, flags, false, check);
 }
 
 static inline void
 
 }
 
 struct irq_desc *
-__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus)
+__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
+                   unsigned int check)
 {
        struct irq_desc *desc = irq_to_desc(irq);
 
        if (desc) {
+               if (check & _IRQ_DESC_CHECK) {
+                       if ((check & _IRQ_DESC_PERCPU) &&
+                           !irq_settings_is_per_cpu_devid(desc))
+                               return NULL;
+
+                       if (!(check & _IRQ_DESC_PERCPU) &&
+                           irq_settings_is_per_cpu_devid(desc))
+                               return NULL;
+               }
+
                if (bus)
                        chip_bus_lock(desc);
                raw_spin_lock_irqsave(&desc->lock, *flags);
                chip_bus_sync_unlock(desc);
 }
 
+int irq_set_percpu_devid(unsigned int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       if (!desc)
+               return -EINVAL;
+
+       if (desc->percpu_enabled)
+               return -EINVAL;
+
+       desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
+
+       if (!desc->percpu_enabled)
+               return -ENOMEM;
+
+       irq_set_percpu_devid_flags(irq);
+       return 0;
+}
+
 /**
  * dynamic_irq_cleanup - cleanup a dynamically allocated irq
  * @irq:       irq number to initialize
 
 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
 {
        unsigned long flags;
-       struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+       struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 
        if (!desc)
                return -EINVAL;
 static int __disable_irq_nosync(unsigned int irq)
 {
        unsigned long flags;
-       struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
+       struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 
        if (!desc)
                return -EINVAL;
 void enable_irq(unsigned int irq)
 {
        unsigned long flags;
-       struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
+       struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 
        if (!desc)
                return;
 int irq_set_irq_wake(unsigned int irq, unsigned int on)
 {
        unsigned long flags;
-       struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
+       struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
        int ret = 0;
 
        if (!desc)
 int can_request_irq(unsigned int irq, unsigned long irqflags)
 {
        unsigned long flags;
-       struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+       struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
        int canrequest = 0;
 
        if (!desc)
        int retval;
        struct irq_desc *desc = irq_to_desc(irq);
 
+       if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
+               return -EINVAL;
        chip_bus_lock(desc);
        retval = __setup_irq(irq, desc, act);
        chip_bus_sync_unlock(desc);
 }
 EXPORT_SYMBOL_GPL(setup_irq);
 
- /*
+/*
  * Internal function to unregister an irqaction - used to free
  * regular and special interrupts that are part of the architecture.
  */
  */
 void remove_irq(unsigned int irq, struct irqaction *act)
 {
-       __free_irq(irq, act->dev_id);
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
+           __free_irq(irq, act->dev_id);
 }
 EXPORT_SYMBOL_GPL(remove_irq);
 
 {
        struct irq_desc *desc = irq_to_desc(irq);
 
-       if (!desc)
+       if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
                return;
 
 #ifdef CONFIG_SMP
        if (!desc)
                return -EINVAL;
 
-       if (!irq_settings_can_request(desc))
+       if (!irq_settings_can_request(desc) ||
+           WARN_ON(irq_settings_is_per_cpu_devid(desc)))
                return -EINVAL;
 
        if (!handler) {
        return !ret ? IRQC_IS_HARDIRQ : ret;
 }
 EXPORT_SYMBOL_GPL(request_any_context_irq);
+
+void enable_percpu_irq(unsigned int irq)
+{
+       unsigned int cpu = smp_processor_id();
+       unsigned long flags;
+       struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
+
+       if (!desc)
+               return;
+
+       irq_percpu_enable(desc, cpu);
+       irq_put_desc_unlock(desc, flags);
+}
+
+void disable_percpu_irq(unsigned int irq)
+{
+       unsigned int cpu = smp_processor_id();
+       unsigned long flags;
+       struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
+
+       if (!desc)
+               return;
+
+       irq_percpu_disable(desc, cpu);
+       irq_put_desc_unlock(desc, flags);
+}
+
+/*
+ * Internal function to unregister a percpu irqaction.
+ */
+static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       struct irqaction *action;
+       unsigned long flags;
+
+       WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
+
+       if (!desc)
+               return NULL;
+
+       raw_spin_lock_irqsave(&desc->lock, flags);
+
+       action = desc->action;
+       if (!action || action->percpu_dev_id != dev_id) {
+               WARN(1, "Trying to free already-free IRQ %d\n", irq);
+               goto bad;
+       }
+
+       if (!cpumask_empty(desc->percpu_enabled)) {
+               WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
+                    irq, cpumask_first(desc->percpu_enabled));
+               goto bad;
+       }
+
+       /* Found it - now remove it from the list of entries: */
+       desc->action = NULL;
+
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+       unregister_handler_proc(irq, action);
+
+       module_put(desc->owner);
+       return action;
+
+bad:
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
+       return NULL;
+}
+
+/**
+ *     remove_percpu_irq - free a per-cpu interrupt
+ *     @irq: Interrupt line to free
+ *     @act: irqaction for the interrupt
+ *
+ * Used to remove interrupts statically setup by the early boot process.
+ */
+void remove_percpu_irq(unsigned int irq, struct irqaction *act)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       if (desc && irq_settings_is_per_cpu_devid(desc))
+           __free_percpu_irq(irq, act->percpu_dev_id);
+}
+
+/**
+ *     free_percpu_irq - free an interrupt allocated with request_percpu_irq
+ *     @irq: Interrupt line to free
+ *     @dev_id: Device identity to free
+ *
+ *     Remove a percpu interrupt handler. The handler is removed, but
+ *     the interrupt line is not disabled. This must be done on each
+ *     CPU before calling this function. The function does not return
+ *     until any executing interrupts for this IRQ have completed.
+ *
+ *     This function must not be called from interrupt context.
+ */
+void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       if (!desc || !irq_settings_is_per_cpu_devid(desc))
+               return;
+
+       chip_bus_lock(desc);
+       kfree(__free_percpu_irq(irq, dev_id));
+       chip_bus_sync_unlock(desc);
+}
+
+/**
+ *     setup_percpu_irq - setup a per-cpu interrupt
+ *     @irq: Interrupt line to setup
+ *     @act: irqaction for the interrupt
+ *
+ * Used to statically setup per-cpu interrupts in the early boot process.
+ */
+int setup_percpu_irq(unsigned int irq, struct irqaction *act)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       int retval;
+
+       if (!desc || !irq_settings_is_per_cpu_devid(desc))
+               return -EINVAL;
+       chip_bus_lock(desc);
+       retval = __setup_irq(irq, desc, act);
+       chip_bus_sync_unlock(desc);
+
+       return retval;
+}
+
+/**
+ *     request_percpu_irq - allocate a percpu interrupt line
+ *     @irq: Interrupt line to allocate
+ *     @handler: Function to be called when the IRQ occurs.
+ *     @devname: An ascii name for the claiming device
+ *     @dev_id: A percpu cookie passed back to the handler function
+ *
+ *     This call allocates interrupt resources, but doesn't
+ *     automatically enable the interrupt. It has to be done on each
+ *     CPU using enable_percpu_irq().
+ *
+ *     Dev_id must be globally unique. It is a per-cpu variable, and
+ *     the handler gets called with the interrupted CPU's instance of
+ *     that variable.
+ */
+int request_percpu_irq(unsigned int irq, irq_handler_t handler,
+                      const char *devname, void __percpu *dev_id)
+{
+       struct irqaction *action;
+       struct irq_desc *desc;
+       int retval;
+
+       if (!dev_id)
+               return -EINVAL;
+
+       desc = irq_to_desc(irq);
+       if (!desc || !irq_settings_can_request(desc) ||
+           !irq_settings_is_per_cpu_devid(desc))
+               return -EINVAL;
+
+       action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
+       if (!action)
+               return -ENOMEM;
+
+       action->handler = handler;
+       action->flags = IRQF_PERCPU;
+       action->name = devname;
+       action->percpu_dev_id = dev_id;
+
+       chip_bus_lock(desc);
+       retval = __setup_irq(irq, desc, action);
+       chip_bus_sync_unlock(desc);
+
+       if (retval)
+               kfree(action);
+
+       return retval;
+}
 
        _IRQ_MOVE_PCNTXT        = IRQ_MOVE_PCNTXT,
        _IRQ_NO_BALANCING       = IRQ_NO_BALANCING,
        _IRQ_NESTED_THREAD      = IRQ_NESTED_THREAD,
+       _IRQ_PER_CPU_DEVID      = IRQ_PER_CPU_DEVID,
        _IRQF_MODIFY_MASK       = IRQF_MODIFY_MASK,
 };
 
 #define IRQ_NOTHREAD           GOT_YOU_MORON
 #define IRQ_NOAUTOEN           GOT_YOU_MORON
 #define IRQ_NESTED_THREAD      GOT_YOU_MORON
+#define IRQ_PER_CPU_DEVID      GOT_YOU_MORON
 #undef IRQF_MODIFY_MASK
 #define IRQF_MODIFY_MASK       GOT_YOU_MORON
 
        return desc->status_use_accessors & _IRQ_PER_CPU;
 }
 
+static inline bool irq_settings_is_per_cpu_devid(struct irq_desc *desc)
+{
+       return desc->status_use_accessors & _IRQ_PER_CPU_DEVID;
+}
+
 static inline void irq_settings_set_per_cpu(struct irq_desc *desc)
 {
        desc->status_use_accessors |= _IRQ_PER_CPU;