static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
 
 static DEFINE_SPINLOCK(ioapic_lock);
-static DEFINE_SPINLOCK(vector_lock);
+DEFINE_SPINLOCK(vector_lock);
 
 /*
  * # of IRQ routing registers
 
        BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
 
+       /* Only try and allocate irqs on cpus that are present */
+       cpus_and(mask, mask, cpu_online_map);
+
        if (irq_vector[irq] > 0)
                old_vector = irq_vector[irq];
        if (old_vector > 0) {
        }
 
        for_each_cpu_mask(cpu, mask) {
-               cpumask_t domain;
+               cpumask_t domain, new_mask;
                int new_cpu;
                int vector, offset;
 
                domain = vector_allocation_domain(cpu);
+               cpus_and(new_mask, domain, cpu_online_map);
 
                vector = current_vector;
                offset = current_offset;
                        continue;
                if (vector == IA32_SYSCALL_VECTOR)
                        goto next;
-               for_each_cpu_mask(new_cpu, domain)
+               for_each_cpu_mask(new_cpu, new_mask)
                        if (per_cpu(vector_irq, new_cpu)[vector] != -1)
                                goto next;
                /* Found one! */
                current_vector = vector;
                current_offset = offset;
                if (old_vector >= 0) {
+                       cpumask_t old_mask;
                        int old_cpu;
-                       for_each_cpu_mask(old_cpu, irq_domain[irq])
+                       cpus_and(old_mask, irq_domain[irq], cpu_online_map);
+                       for_each_cpu_mask(old_cpu, old_mask)
                                per_cpu(vector_irq, old_cpu)[old_vector] = -1;
                }
-               for_each_cpu_mask(new_cpu, domain)
+               for_each_cpu_mask(new_cpu, new_mask)
                        per_cpu(vector_irq, new_cpu)[vector] = irq;
                irq_vector[irq] = vector;
                irq_domain[irq] = domain;
        return vector;
 }
 
+void __setup_vector_irq(int cpu)
+{
+       /* Initialize vector_irq on a new cpu */
+       /* This function must be called with vector_lock held */
+       unsigned long flags;
+       int irq, vector;
+
+
+       /* Mark the inuse vectors */
+       for (irq = 0; irq < NR_IRQ_VECTORS; ++irq) {
+               if (!cpu_isset(cpu, irq_domain[irq]))
+                       continue;
+               vector = irq_vector[irq];
+               per_cpu(vector_irq, cpu)[vector] = irq;
+       }
+       /* Mark the free vectors */
+       for (vector = 0; vector < NR_VECTORS; ++vector) {
+               irq = per_cpu(vector_irq, cpu)[vector];
+               if (irq < 0)
+                       continue;
+               if (!cpu_isset(cpu, irq_domain[irq]))
+                       per_cpu(vector_irq, cpu)[vector] = -1;
+       }
+}
+
+
 extern void (*interrupt[NR_IRQS])(void);
 
 static struct irq_chip ioapic_chip;
 
         * smp_call_function().
         */
        lock_ipi_call_lock();
+       spin_lock(&vector_lock);
 
+       /* Setup the per cpu irq handling data structures */
+       __setup_vector_irq(smp_processor_id());
        /*
         * Allow the master to continue.
         */
        cpu_set(smp_processor_id(), cpu_online_map);
        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
+       spin_unlock(&vector_lock);
        unlock_ipi_call_lock();
 
        cpu_idle();
                                cpu, node);
        }
 
-
        alternatives_smp_switch(1);
 
        c_idle.idle = get_idle_for_cpu(cpu);
        local_irq_disable();
        remove_siblinginfo(cpu);
 
+       spin_lock(&vector_lock);
        /* It's now safe to remove this processor from the online map */
        cpu_clear(cpu, cpu_online_map);
+       spin_unlock(&vector_lock);
        remove_cpu_from_maps();
        fixup_irqs(cpu_online_map);
        return 0;