err = assign_irq_vector(irq, data, dest);
        if (err) {
-               struct irq_data *top = irq_get_irq_data(irq);
-
                if (assign_irq_vector(irq, data,
-                                     irq_data_get_affinity_mask(top)))
+                                     irq_data_get_affinity_mask(irq_data)))
                        pr_err("Failed to recover vector for irq %d\n", irq);
                return err;
        }
 
 /*
  * Return value for chip->irq_set_affinity()
  *
- * IRQ_SET_MASK_OK     - OK, core updates irq_data.affinity
- * IRQ_SET_MASK_NOCPY  - OK, chip did update irq_data.affinity
+ * IRQ_SET_MASK_OK     - OK, core updates irq_common_data.affinity
+ * IRQ_SET_MASK_NOCPY  - OK, chip did update irq_common_data.affinity
  * IRQ_SET_MASK_OK_DONE        - Same as IRQ_SET_MASK_OK for core. Special code to
  *                       support stacked irqchips, which indicates skipping
  *                       all descendent irqchips.
  *                     Use accessor functions to deal with it
  * @node:              node index useful for balancing
  * @handler_data:      per-IRQ data for the irq_chip methods
+ * @affinity:          IRQ affinity on SMP
  */
 struct irq_common_data {
        unsigned int            state_use_accessors;
        unsigned int            node;
 #endif
        void                    *handler_data;
+       cpumask_var_t           affinity;
 };
 
 /**
  * @chip_data:         platform-specific per-chip private data for the chip
  *                     methods, to allow shared chip implementations
  * @msi_desc:          MSI descriptor
- * @affinity:          IRQ affinity on SMP
  */
 struct irq_data {
        u32                     mask;
 #endif
        void                    *chip_data;
        struct msi_desc         *msi_desc;
-       cpumask_var_t           affinity;
 };
 
 /*
 {
        struct irq_data *d = irq_get_irq_data(irq);
 
-       return d ? d->affinity : NULL;
+       return d ? d->common->affinity : NULL;
 }
 
 static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
 {
-       return d->affinity;
+       return d->common->affinity;
 }
 
 unsigned int arch_dynirq_lower_bound(unsigned int from);
 
 #ifdef CONFIG_SMP
 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
 {
-       if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
+       if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
+                                    gfp, node))
                return -ENOMEM;
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
        if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
-               free_cpumask_var(desc->irq_data.affinity);
+               free_cpumask_var(desc->irq_common_data.affinity);
                return -ENOMEM;
        }
 #endif
 
 static void desc_smp_init(struct irq_desc *desc, int node)
 {
-       cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
+       cpumask_copy(desc->irq_common_data.affinity, irq_default_affinity);
 #ifdef CONFIG_GENERIC_PENDING_IRQ
        cpumask_clear(desc->pending_mask);
 #endif
 #ifdef CONFIG_GENERIC_PENDING_IRQ
        free_cpumask_var(desc->pending_mask);
 #endif
-       free_cpumask_var(desc->irq_data.affinity);
+       free_cpumask_var(desc->irq_common_data.affinity);
 }
 #else
 static inline void free_masks(struct irq_desc *desc) { }
 
        switch (ret) {
        case IRQ_SET_MASK_OK:
        case IRQ_SET_MASK_OK_DONE:
-               cpumask_copy(data->affinity, mask);
+               cpumask_copy(desc->irq_common_data.affinity, mask);
        case IRQ_SET_MASK_OK_NOCOPY:
                irq_set_thread_affinity(desc);
                ret = 0;
        if (irq_move_pending(&desc->irq_data))
                irq_get_pending(cpumask, desc);
        else
-               cpumask_copy(cpumask, desc->irq_data.affinity);
+               cpumask_copy(cpumask, desc->irq_common_data.affinity);
        raw_spin_unlock_irqrestore(&desc->lock, flags);
 
        notify->notify(notify, cpumask);
         * one of the targets is online.
         */
        if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
-               if (cpumask_intersects(desc->irq_data.affinity,
+               if (cpumask_intersects(desc->irq_common_data.affinity,
                                       cpu_online_mask))
-                       set = desc->irq_data.affinity;
+                       set = desc->irq_common_data.affinity;
                else
                        irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
        }
         * This code is triggered unconditionally. Check the affinity
         * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
         */
-       if (desc->irq_data.affinity)
-               cpumask_copy(mask, desc->irq_data.affinity);
+       if (desc->irq_common_data.affinity)
+               cpumask_copy(mask, desc->irq_common_data.affinity);
        else
                valid = false;
        raw_spin_unlock_irq(&desc->lock);
 
 static int show_irq_affinity(int type, struct seq_file *m, void *v)
 {
        struct irq_desc *desc = irq_to_desc((long)m->private);
-       const struct cpumask *mask = desc->irq_data.affinity;
+       const struct cpumask *mask = desc->irq_common_data.affinity;
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
        if (irqd_is_setaffinity_pending(&desc->irq_data))