#include <asm/atomic.h>
 
 
-#define get_irq_desc(irq) (&irq_desc[(irq)])
-
 /* Define a way to iterate across irqs. */
 #define for_each_irq(i) \
        for ((i) = 0; (i) < NR_IRQS; ++(i))
 
        hard_irq_disable();
 
        for_each_irq(i) {
-               struct irq_desc *desc = irq_desc + i;
+               struct irq_desc *desc = irq_to_desc(i);
 
                if (desc->status & IRQ_INPROGRESS)
                        desc->chip->eoi(i);
 
        }
 
        if (i < NR_IRQS) {
-               desc = get_irq_desc(i);
+               desc = irq_to_desc(i);
                spin_lock_irqsave(&desc->lock, flags);
                action = desc->action;
                if (!action || !action->handler)
 #ifdef CONFIG_HOTPLUG_CPU
 void fixup_irqs(cpumask_t map)
 {
+       struct irq_desc *desc;
        unsigned int irq;
        static int warned;
 
        for_each_irq(irq) {
                cpumask_t mask;
 
-               if (irq_desc[irq].status & IRQ_PER_CPU)
+               desc = irq_to_desc(irq);
+               if (desc && desc->status & IRQ_PER_CPU)
                        continue;
 
-               cpumask_and(&mask, irq_desc[irq].affinity, &map);
+               cpumask_and(&mask, desc->affinity, &map);
                if (any_online_cpu(mask) == NR_CPUS) {
                        printk("Breaking affinity for irq %i\n", irq);
                        mask = map;
                }
-               if (irq_desc[irq].chip->set_affinity)
-                       irq_desc[irq].chip->set_affinity(irq, &mask);
-               else if (irq_desc[irq].action && !(warned++))
+               if (desc->chip->set_affinity)
+                       desc->chip->set_affinity(irq, &mask);
+               else if (desc->action && !(warned++))
                        printk("Cannot set affinity for irq %i\n", irq);
        }
 
                return;
        }
 
-       desc = irq_desc + irq;
+       desc = irq_to_desc(irq);
        saved_sp_limit = current->thread.ksp_limit;
 
        irqtp->task = curtp->task;
                        smp_wmb();
 
                        /* Clear norequest flags */
-                       get_irq_desc(i)->status &= ~IRQ_NOREQUEST;
+                       irq_to_desc(i)->status &= ~IRQ_NOREQUEST;
 
                        /* Legacy flags are left to default at this point,
                         * one can then use irq_create_mapping() to
                            irq_hw_number_t hwirq)
 {
        /* Clear IRQ_NOREQUEST flag */
-       get_irq_desc(virq)->status &= ~IRQ_NOREQUEST;
+       irq_to_desc(virq)->status &= ~IRQ_NOREQUEST;
 
        /* map it */
        smp_wmb();
 
        /* Set type if specified and different than the current one */
        if (type != IRQ_TYPE_NONE &&
-           type != (get_irq_desc(virq)->status & IRQF_TRIGGER_MASK))
+           type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK))
                set_irq_type(virq, type);
        return virq;
 }
        irq_map[virq].hwirq = host->inval_irq;
 
        /* Set some flags */
-       get_irq_desc(virq)->status |= IRQ_NOREQUEST;
+       irq_to_desc(virq)->status |= IRQ_NOREQUEST;
 
        /* Free it */
        irq_free_virt(virq, 1);
        unsigned int i;
 
        for (i = 0; i < NR_IRQS; i++)
-               get_irq_desc(i)->status |= IRQ_NOREQUEST;
+               irq_to_desc(i)->status |= IRQ_NOREQUEST;
 }
 
 /* We need to create the radix trees late */
                      "chip name", "host name");
 
        for (i = 1; i < NR_IRQS; i++) {
-               desc = get_irq_desc(i);
+               desc = irq_to_desc(i);
                spin_lock_irqsave(&desc->lock, flags);
 
                if (desc->action && desc->action->handler) {
 
 cpld_pic_host_map(struct irq_host *h, unsigned int virq,
                             irq_hw_number_t hw)
 {
-       get_irq_desc(virq)->status |= IRQ_LEVEL;
+       irq_to_desc(virq)->status |= IRQ_LEVEL;
        set_irq_chip_and_handler(virq, &cpld_pic, handle_level_irq);
        return 0;
 }
 
 static int media5200_irq_map(struct irq_host *h, unsigned int virq,
                             irq_hw_number_t hw)
 {
-       struct irq_desc *desc = get_irq_desc(virq);
+       struct irq_desc *desc = irq_to_desc(virq);
 
        pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw);
        set_irq_chip_data(virq, &media5200_irq);
 
 static int pci_pic_host_map(struct irq_host *h, unsigned int virq,
                            irq_hw_number_t hw)
 {
-       get_irq_desc(virq)->status |= IRQ_LEVEL;
+       irq_to_desc(virq)->status |= IRQ_LEVEL;
        set_irq_chip_data(virq, h->host_data);
        set_irq_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq);
        return 0;
 
                irq_hw_number_t hwirq)
 {
        /* All interrupts are LEVEL sensitive */
-       get_irq_desc(virq)->status |= IRQ_LEVEL;
+       irq_to_desc(virq)->status |= IRQ_LEVEL;
        set_irq_chip_and_handler(virq, &socrates_fpga_pic_chip,
                        handle_fasteoi_irq);
 
 
                          irq_hw_number_t hwirq)
 {
        /* All interrupts are LEVEL sensitive */
-       get_irq_desc(virq)->status |= IRQ_LEVEL;
+       irq_to_desc(virq)->status |= IRQ_LEVEL;
        set_irq_chip_and_handler(virq, &gef_pic_chip, handle_level_irq);
 
        return 0;
 
 static int beatic_pic_host_map(struct irq_host *h, unsigned int virq,
                               irq_hw_number_t hw)
 {
-       struct irq_desc *desc = get_irq_desc(virq);
+       struct irq_desc *desc = irq_to_desc(virq);
        int64_t err;
 
        err = beat_construct_and_connect_irq_plug(virq, hw);
 
 
        /* Reset edge detection logic if necessary
         */
-       if (get_irq_desc(virq)->status & IRQ_LEVEL)
+       if (irq_to_desc(virq)->status & IRQ_LEVEL)
                return;
 
        /* Only interrupts 47 to 50 can be set to edge */
        struct spider_pic *pic = spider_virq_to_pic(virq);
        unsigned int hw = irq_map[virq].hwirq;
        void __iomem *cfg = spider_get_irq_config(pic, hw);
-       struct irq_desc *desc = get_irq_desc(virq);
+       struct irq_desc *desc = irq_to_desc(virq);
        u32 old_mask;
        u32 ic;
 
 
        unsigned long flags;
 
        for_each_irq (irq) {
-               struct irq_desc *desc = get_irq_desc(irq);
+               struct irq_desc *desc = irq_to_desc(irq);
 
                if (desc && desc->chip && desc->chip->startup) {
                        spin_lock_irqsave(&desc->lock, flags);
 
         unsigned long bit = 1UL << (src & 0x1f);
         int i = src >> 5;
 
-       spin_lock_irqsave(&pmac_pic_lock, flags);
-       if ((irq_desc[virq].status & IRQ_LEVEL) == 0)
+       spin_lock_irqsave(&pmac_pic_lock, flags);
+       if ((irq_to_desc(virq)->status & IRQ_LEVEL) == 0)
                out_le32(&pmac_irq_hw[i]->ack, bit);
         __set_bit(src, ppc_cached_irq_mask);
         __pmac_set_irq_mask(src, 0);
-       spin_unlock_irqrestore(&pmac_pic_lock, flags);
+       spin_unlock_irqrestore(&pmac_pic_lock, flags);
 
        return 0;
 }
 static int pmac_pic_host_map(struct irq_host *h, unsigned int virq,
                             irq_hw_number_t hw)
 {
-       struct irq_desc *desc = get_irq_desc(virq);
+       struct irq_desc *desc = irq_to_desc(virq);
        int level;
 
        if (hw >= max_irqs)
 
        cpumask_t cpumask;
        cpumask_t tmp = CPU_MASK_NONE;
 
-       cpumask_copy(&cpumask, irq_desc[virq].affinity);
+       cpumask_copy(&cpumask, irq_to_desc(virq)->affinity);
        if (!distribute_irqs)
                return default_server;
 
        /* Insert the interrupt mapping into the radix tree for fast lookup */
        irq_radix_revmap_insert(xics_host, virq, hw);
 
-       get_irq_desc(virq)->status |= IRQ_LEVEL;
+       irq_to_desc(virq)->status |= IRQ_LEVEL;
        set_irq_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq);
        return 0;
 }
                /* We need to get IPIs still. */
                if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
                        continue;
-               desc = get_irq_desc(virq);
+               desc = irq_to_desc(virq);
 
                /* We only need to migrate enabled IRQS */
                if (desc == NULL || desc->chip == NULL
                       virq, cpu);
 
                /* Reset affinity to all cpus */
-               cpumask_setall(irq_desc[virq].affinity);
+               cpumask_setall(irq_to_desc(virq)->affinity);
                desc->chip->set_affinity(virq, cpu_all_mask);
 unlock:
                spin_unlock_irqrestore(&desc->lock, flags);
 
 {
        pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw);
 
-       get_irq_desc(virq)->status |= IRQ_LEVEL;
+       irq_to_desc(virq)->status |= IRQ_LEVEL;
        set_irq_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq);
        return 0;
 }
 
 
 static void cpm2_end_irq(unsigned int virq)
 {
+       struct irq_desc *desc;
        int     bit, word;
        unsigned int irq_nr = virq_to_hw(virq);
 
-       if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))
-                       && irq_desc[irq_nr].action) {
+       desc = irq_to_desc(irq_nr);
+       if (!(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))
+                       && desc->action) {
 
                bit = irq_to_siubit[irq_nr];
                word = irq_to_siureg[irq_nr];
 static int cpm2_set_irq_type(unsigned int virq, unsigned int flow_type)
 {
        unsigned int src = virq_to_hw(virq);
-       struct irq_desc *desc = get_irq_desc(virq);
+       struct irq_desc *desc = irq_to_desc(virq);
        unsigned int vold, vnew, edibit;
 
        if (flow_type == IRQ_TYPE_NONE)
 {
        pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw);
 
-       get_irq_desc(virq)->status |= IRQ_LEVEL;
+       irq_to_desc(virq)->status |= IRQ_LEVEL;
        set_irq_chip_and_handler(virq, &cpm2_pic, handle_level_irq);
        return 0;
 }
 
 {
        struct irq_chip *chip = &fsl_msi_chip;
 
-       get_irq_desc(virq)->status |= IRQ_TYPE_EDGE_FALLING;
+       irq_to_desc(virq)->status |= IRQ_TYPE_EDGE_FALLING;
 
        set_irq_chip_and_handler(virq, chip, handle_edge_irq);
 
 
 
        /* We block the internal cascade */
        if (hw == 2)
-               get_irq_desc(virq)->status |= IRQ_NOREQUEST;
+               irq_to_desc(virq)->status |= IRQ_NOREQUEST;
 
        /* We use the level handler only for now, we might want to
         * be more cautious here but that works for now
         */
-       get_irq_desc(virq)->status |= IRQ_LEVEL;
+       irq_to_desc(virq)->status |= IRQ_LEVEL;
        set_irq_chip_and_handler(virq, &i8259_pic, handle_level_irq);
        return 0;
 }
 
 {
        struct ipic *ipic = ipic_from_irq(virq);
        unsigned int src = ipic_irq_to_hw(virq);
-       struct irq_desc *desc = get_irq_desc(virq);
+       struct irq_desc *desc = irq_to_desc(virq);
        unsigned int vold, vnew, edibit;
 
        if (flow_type == IRQ_TYPE_NONE)
 
 
 static int mpc8xx_set_irq_type(unsigned int virq, unsigned int flow_type)
 {
-       struct irq_desc *desc = get_irq_desc(virq);
+       struct irq_desc *desc = irq_to_desc(virq);
 
        desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
        desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
 
        cpumask_t mask;
        int cpuid;
 
-       cpumask_copy(&mask, irq_desc[virt_irq].affinity);
+       cpumask_copy(&mask, irq_to_desc(virt_irq)->affinity);
        if (cpus_equal(mask, CPU_MASK_ALL)) {
                static int irq_rover;
                static DEFINE_SPINLOCK(irq_rover_lock);
        if (irq < NUM_ISA_INTERRUPTS)
                return NULL;
 
-       return irq_desc[irq].chip_data;
+       return irq_to_desc(irq)->chip_data;
 }
 
 /* Determine if the linux irq is an IPI */
 /* Get the mpic structure from the IPI number */
 static inline struct mpic * mpic_from_ipi(unsigned int ipi)
 {
-       return irq_desc[ipi].chip_data;
+       return irq_to_desc(ipi)->chip_data;
 }
 #endif
 
 /* Get the mpic structure from the irq number */
 static inline struct mpic * mpic_from_irq(unsigned int irq)
 {
-       return irq_desc[irq].chip_data;
+       return irq_to_desc(irq)->chip_data;
 }
 
 /* Send an EOI */
 
        mpic_unmask_irq(irq);
 
-       if (irq_desc[irq].status & IRQ_LEVEL)
+       if (irq_to_desc(irq)->status & IRQ_LEVEL)
                mpic_ht_end_irq(mpic, src);
 }
 
        unsigned int src = mpic_irq_to_hw(irq);
 
        mpic_unmask_irq(irq);
-       mpic_startup_ht_interrupt(mpic, src, irq_desc[irq].status);
+       mpic_startup_ht_interrupt(mpic, src, irq_to_desc(irq)->status);
 
        return 0;
 }
        struct mpic *mpic = mpic_from_irq(irq);
        unsigned int src = mpic_irq_to_hw(irq);
 
-       mpic_shutdown_ht_interrupt(mpic, src, irq_desc[irq].status);
+       mpic_shutdown_ht_interrupt(mpic, src, irq_to_desc(irq)->status);
        mpic_mask_irq(irq);
 }
 
         * latched another edge interrupt coming in anyway
         */
 
-       if (irq_desc[irq].status & IRQ_LEVEL)
+       if (irq_to_desc(irq)->status & IRQ_LEVEL)
                mpic_ht_end_irq(mpic, src);
        mpic_eoi(mpic);
 }
 {
        struct mpic *mpic = mpic_from_irq(virq);
        unsigned int src = mpic_irq_to_hw(virq);
-       struct irq_desc *desc = get_irq_desc(virq);
+       struct irq_desc *desc = irq_to_desc(virq);
        unsigned int vecpri, vold, vnew;
 
        DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n",
 
 {
        int level1;
 
-       get_irq_desc(virq)->status |= IRQ_LEVEL;
+       irq_to_desc(virq)->status |= IRQ_LEVEL;
 
        level1 = (hwirq & MV64x60_LEVEL1_MASK) >> MV64x60_LEVEL1_OFFSET;
        BUG_ON(level1 > MV64x60_LEVEL1_GPP);
 
 
 static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
 {
-       return irq_desc[virq].chip_data;
+       return irq_to_desc(virq)->chip_data;
 }
 
 #define virq_to_hw(virq)       ((unsigned int)irq_map[virq].hwirq)
        chip = &qe_ic->hc_irq;
 
        set_irq_chip_data(virq, qe_ic);
-       get_irq_desc(virq)->status |= IRQ_LEVEL;
+       irq_to_desc(virq)->status |= IRQ_LEVEL;
 
        set_irq_chip_and_handler(virq, chip, handle_level_irq);
 
 
        DBG("%s(%d, 0x%lx)\n", __func__, virq, hw);
        if ((virq >= 1) && (virq <= 4)){
                irq = virq + IRQ_PCI_INTAD_BASE - 1;
-               get_irq_desc(irq)->status |= IRQ_LEVEL;
+               irq_to_desc(irq)->status |= IRQ_LEVEL;
                set_irq_chip(irq, &tsi108_pci_irq);
        }
        return 0;
 
 
 static void uic_unmask_irq(unsigned int virq)
 {
-       struct irq_desc *desc = get_irq_desc(virq);
+       struct irq_desc *desc = irq_to_desc(virq);
        struct uic *uic = get_irq_chip_data(virq);
        unsigned int src = uic_irq_to_hw(virq);
        unsigned long flags;
 
 static void uic_mask_ack_irq(unsigned int virq)
 {
-       struct irq_desc *desc = get_irq_desc(virq);
+       struct irq_desc *desc = irq_to_desc(virq);
        struct uic *uic = get_irq_chip_data(virq);
        unsigned int src = uic_irq_to_hw(virq);
        unsigned long flags;
 {
        struct uic *uic = get_irq_chip_data(virq);
        unsigned int src = uic_irq_to_hw(virq);
-       struct irq_desc *desc = get_irq_desc(virq);
+       struct irq_desc *desc = irq_to_desc(virq);
        unsigned long flags;
        int trigger, polarity;
        u32 tr, pr, mask;
 
 
 static int xilinx_intc_set_type(unsigned int virq, unsigned int flow_type)
 {
-       struct irq_desc *desc = get_irq_desc(virq);
+       struct irq_desc *desc = irq_to_desc(virq);
 
        desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
        desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;