/* push everything to CPU 0 to give us a starting point.  */
        for (i = 0 ; i < NR_IRQS ; i++) {
-               pending_irq_cpumask[i] = cpumask_of_cpu(0);
+               irq_desc[i].pending_mask = cpumask_of_cpu(0);
                set_pending_irq(i, cpumask_of_cpu(0));
        }
 
 
        cpumask_t               affinity;
 #endif
 #if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
+       cpumask_t               pending_mask;
        unsigned int            move_irq;       /* need to re-target IRQ dest */
 #endif
 #ifdef CONFIG_PROC_FS
 #ifdef CONFIG_SMP
 
 #if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
-extern cpumask_t pending_irq_cpumask[NR_IRQS];
 
 void set_pending_irq(unsigned int irq, cpumask_t mask);
 void move_native_irq(int irq);
 
 
 #ifdef CONFIG_SMP
 
-#if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
-cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
-#endif
-
 /**
  *     synchronize_irq - wait for pending IRQ handlers (on other CPUs)
  *     @irq: interrupt number to wait for
 
 
        spin_lock_irqsave(&desc->lock, flags);
        desc->move_irq = 1;
-       pending_irq_cpumask[irq] = mask;
+       irq_desc[irq].pending_mask = mask;
        spin_unlock_irqrestore(&desc->lock, flags);
 }
 
 
        desc->move_irq = 0;
 
-       if (unlikely(cpus_empty(pending_irq_cpumask[irq])))
+       if (unlikely(cpus_empty(irq_desc[irq].pending_mask)))
                return;
 
        if (!desc->chip->set_affinity)
 
        assert_spin_locked(&desc->lock);
 
-       cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
+       cpus_and(tmp, irq_desc[irq].pending_mask, cpu_online_map);
 
        /*
         * If there was a valid mask to work with, please
                if (likely(!(desc->status & IRQ_DISABLED)))
                        desc->chip->enable(irq);
        }
-       cpus_clear(pending_irq_cpumask[irq]);
+       cpus_clear(irq_desc[irq].pending_mask);
 }