* context. So we need to disable bh here to avoid deadlocks and other
  * side effects.
  */
-static void
+static irqreturn_t
 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
 {
+       irqreturn_t ret;
+
        local_bh_disable();
-       action->thread_fn(action->irq, action->dev_id);
+       ret = action->thread_fn(action->irq, action->dev_id);
        irq_finalize_oneshot(desc, action, false);
        local_bh_enable();
+       return ret;
 }
 
 /*
  * preemtible - many of them need to sleep and wait for slow busses to
  * complete.
  */
-static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action)
+static irqreturn_t irq_thread_fn(struct irq_desc *desc,
+               struct irqaction *action)
 {
-       action->thread_fn(action->irq, action->dev_id);
+       irqreturn_t ret;
+
+       ret = action->thread_fn(action->irq, action->dev_id);
        irq_finalize_oneshot(desc, action, false);
+       return ret;
 }
 
 /*
        };
        struct irqaction *action = data;
        struct irq_desc *desc = irq_to_desc(action->irq);
-       void (*handler_fn)(struct irq_desc *desc, struct irqaction *action);
+       irqreturn_t (*handler_fn)(struct irq_desc *desc,
+                       struct irqaction *action);
        int wake;
 
        if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
                        desc->istate |= IRQS_PENDING;
                        raw_spin_unlock_irq(&desc->lock);
                } else {
+                       irqreturn_t action_ret;
+
                        raw_spin_unlock_irq(&desc->lock);
-                       handler_fn(desc, action);
+                       action_ret = handler_fn(desc, action);
+                       if (!noirqdebug)
+                               note_interrupt(action->irq, desc, action_ret);
                }
 
                wake = atomic_dec_and_test(&desc->threads_active);
 
                  jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
 }
 
+static inline int bad_action_ret(irqreturn_t action_ret)
+{
+       if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
+               return 0;
+       return 1;
+}
+
 /*
  * If 99,900 of the previous 100,000 interrupts have not been handled
  * then assume that the IRQ is stuck in some manner. Drop a diagnostic
        struct irqaction *action;
        unsigned long flags;
 
-       if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
+       if (bad_action_ret(action_ret)) {
                printk(KERN_ERR "irq event %d: bogus return value %x\n",
                                irq, action_ret);
        } else {
        if (desc->istate & IRQS_POLL_INPROGRESS)
                return;
 
-       if (unlikely(action_ret != IRQ_HANDLED)) {
+       /* we get here again via the threaded handler */
+       if (action_ret == IRQ_WAKE_THREAD)
+               return;
+
+       if (bad_action_ret(action_ret)) {
+               report_bad_irq(irq, desc, action_ret);
+               return;
+       }
+
+       if (unlikely(action_ret == IRQ_NONE)) {
                /*
                 * If we are seeing only the odd spurious IRQ caused by
                 * bus asynchronicity then don't eventually trigger an error,
                else
                        desc->irqs_unhandled++;
                desc->last_unhandled = jiffies;
-               if (unlikely(action_ret != IRQ_NONE))
-                       report_bad_irq(irq, desc, action_ret);
        }
 
        if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {