]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86/irq: Protect smp_cleanup_move
authorThomas Gleixner <tglx@linutronix.de>
Sun, 2 Aug 2015 20:38:23 +0000 (20:38 +0000)
committerChuck Anderson <chuck.anderson@oracle.com>
Tue, 22 Aug 2017 15:43:01 +0000 (08:43 -0700)
smp_cleanup_move fiddles without protection in the interrupt
descriptors and the vector array. A concurrent irq setup/teardown or
affinity setting can pull the rug under that operation.

Add proper locking.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Jiang Liu <jiang.liu@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Link: http://lkml.kernel.org/r/20150802203609.222975294@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
(cherry picked from commit df54c4934e030e73cb6a7bd6713f697350dabd0b)

Orabug: 25677661

Signed-off-by: Ethan Zhao <ethan.zhao@oracle.com>
Reviewed-by: Jack vogel <jack.vogel@oracle.com>
Conflicts:
arch/x86/kernel/apic/vector.c

arch/x86/kernel/apic/vector.c

index 171f89adef597d3ad63c6326a887440ad0178f9d..294ffc2f0be1d1c98f57463b6566cbc671c259f1 100644 (file)
@@ -409,6 +409,9 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
        irq_enter();
        exit_idle();
 
+       /* Prevent vectors vanishing under us */
+       raw_spin_lock(&vector_lock);
+
        me = smp_processor_id();
        for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
                int irq;
@@ -416,6 +419,7 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
                struct irq_desc *desc;
                struct irq_cfg *cfg;
 
+       retry:
                irq = __this_cpu_read(vector_irq[vector]);
 
                if (irq <= VECTOR_UNDEFINED)
@@ -429,7 +433,12 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
                if (!cfg)
                        continue;
 
-               raw_spin_lock(&desc->lock);
+               if (!raw_spin_trylock(&desc->lock)) {
+                       raw_spin_unlock(&vector_lock);
+                       cpu_relax();
+                       raw_spin_lock(&vector_lock);
+                       goto retry;
+               }
 
                /*
                 * Check if the irq migration is in progress. If so, we
@@ -458,7 +467,8 @@ unlock:
                raw_spin_unlock(&desc->lock);
        }
 
-       irq_exit();
+       raw_spin_unlock(&vector_lock);
+       exiting_irq();
 }
 
 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)