#include <linux/irqreturn.h>
    #include <linux/irqnr.h>
    #include <linux/errno.h>
   +#include <linux/topology.h>
  ++#include <linux/wait.h>
    
    #include <asm/irq.h>
    #include <asm/ptrace.h>
        unsigned int            irqs_unhandled;
        spinlock_t              lock;
    #ifdef CONFIG_SMP
   -    cpumask_t               affinity;
   +    cpumask_var_t           affinity;
        unsigned int            cpu;
   -#endif
    #ifdef CONFIG_GENERIC_PENDING_IRQ
   -    cpumask_t               pending_mask;
   +    cpumask_var_t           pending_mask;
   +#endif
    #endif
  ++    atomic_t                threads_active;
  ++    wait_queue_head_t       wait_for_threads;
    #ifdef CONFIG_PROC_FS
        struct proc_dir_entry   *dir;
    #endif
    
    #endif /* !CONFIG_S390 */
    
   +#ifdef CONFIG_SMP
   +/**
   + * init_alloc_desc_masks - allocate cpumasks for irq_desc
   + * @desc:   pointer to irq_desc struct
   + * @cpu:    cpu which will be handling the cpumasks
   + * @boot:   true if need bootmem
   + *
   + * Allocates affinity and pending_mask cpumask if required.
   + * Returns true if successful (or not required).
   + * Side effect: affinity has all bits set, pending_mask has all bits clear.
   + */
   +static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
   +                                                            bool boot)
   +{
   +    int node;
   +
   +    if (boot) {
   +            alloc_bootmem_cpumask_var(&desc->affinity);
   +            cpumask_setall(desc->affinity);
   +
   +#ifdef CONFIG_GENERIC_PENDING_IRQ
   +            alloc_bootmem_cpumask_var(&desc->pending_mask);
   +            cpumask_clear(desc->pending_mask);
   +#endif
   +            return true;
   +    }
   +
   +    node = cpu_to_node(cpu);
   +
   +    if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node))
   +            return false;
   +    cpumask_setall(desc->affinity);
   +
   +#ifdef CONFIG_GENERIC_PENDING_IRQ
   +    if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) {
   +            free_cpumask_var(desc->affinity);
   +            return false;
   +    }
   +    cpumask_clear(desc->pending_mask);
   +#endif
   +    return true;
   +}
   +
   +/**
   + * init_copy_desc_masks - copy cpumasks for irq_desc
   + * @old_desc:       pointer to old irq_desc struct
   + * @new_desc:       pointer to new irq_desc struct
   + *
   + * Insures affinity and pending_masks are copied to new irq_desc.
   + * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
   + * irq_desc struct so the copy is redundant.
   + */
   +
   +static inline void init_copy_desc_masks(struct irq_desc *old_desc,
   +                                    struct irq_desc *new_desc)
   +{
   +#ifdef CONFIG_CPUMASKS_OFFSTACK
   +    cpumask_copy(new_desc->affinity, old_desc->affinity);
   +
   +#ifdef CONFIG_GENERIC_PENDING_IRQ
   +    cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
   +#endif
   +#endif
   +}
   +
++ +static inline void free_desc_masks(struct irq_desc *old_desc,
++ +                               struct irq_desc *new_desc)
++ +{
++ +    free_cpumask_var(old_desc->affinity);
++ +
++ +#ifdef CONFIG_GENERIC_PENDING_IRQ
++ +    free_cpumask_var(old_desc->pending_mask);
++ +#endif
++ +}
++ +
   +#else /* !CONFIG_SMP */
   +
   +static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
   +                                                            bool boot)
   +{
   +    return true;
   +}
   +
   +static inline void init_copy_desc_masks(struct irq_desc *old_desc,
   +                                    struct irq_desc *new_desc)
   +{
   +}
   +
++ +static inline void free_desc_masks(struct irq_desc *old_desc,
++ +                               struct irq_desc *new_desc)
++ +{
++ +}
   +#endif      /* CONFIG_SMP */
   +
    #endif /* _LINUX_IRQ_H */
 
    }
    
    /**
---  * init_timer - initialize a timer.
+++  * init_timer_key - initialize a timer
     * @timer: the timer to be initialized
+++  * @name: name of the timer
+++  * @key: lockdep class key of the fake lock used for tracking timer
+++  *       sync lock dependencies
     *
---  * init_timer() must be done to a timer prior calling *any* of the
+++  * init_timer_key() must be done to a timer prior calling *any* of the
     * other timer functions.
     */
   -void init_timer(struct timer_list *timer)
   +void init_timer_key(struct timer_list *timer,
   +                const char *name,
   +                struct lock_class_key *key)
    {
        debug_timer_init(timer);
   -    __init_timer(timer);
   +    __init_timer(timer, name, key);
    }
   -EXPORT_SYMBOL(init_timer);
   +EXPORT_SYMBOL(init_timer_key);
    
   -void init_timer_deferrable(struct timer_list *timer)
   +void init_timer_deferrable_key(struct timer_list *timer,
   +                           const char *name,
   +                           struct lock_class_key *key)
    {
   -    init_timer(timer);
   +    init_timer_key(timer, name, key);
        timer_set_deferrable(timer);
    }
   -EXPORT_SYMBOL(init_timer_deferrable);
   +EXPORT_SYMBOL(init_timer_deferrable_key);
    
    static inline void detach_timer(struct timer_list *timer,
                                int clear_pending)