]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
Add locking around rcu_cpu_starting()
authorDavid Woodhouse <dwmw@amazon.co.uk>
Tue, 16 Feb 2021 15:04:34 +0000 (15:04 +0000)
committerDavid Woodhouse <dwmw@amazon.co.uk>
Tue, 16 Feb 2021 15:04:34 +0000 (15:04 +0000)
If we bring up APs in parallel this doesn't work well without.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
kernel/rcu/tree.c

index 40e5e3dd253e077cace9e8bd0edae8c71b9bf87d..a5af3c6ba7d5e70297b97841078551ba10135810 100644 (file)
@@ -4101,6 +4101,8 @@ int rcutree_offline_cpu(unsigned int cpu)
  * from the incoming CPU rather than from the cpuhp_step mechanism.
  * This is because this function must be invoked at a precise location.
  */
+static DEFINE_RAW_SPINLOCK(rcu_startup_lock);
+
 void rcu_cpu_starting(unsigned int cpu)
 {
        unsigned long flags;
@@ -4109,9 +4111,11 @@ void rcu_cpu_starting(unsigned int cpu)
        struct rcu_node *rnp;
        bool newcpu;
 
+       raw_spin_lock(&rcu_startup_lock);
+
        rdp = per_cpu_ptr(&rcu_data, cpu);
        if (rdp->cpu_started)
-               return;
+               goto out;
        rdp->cpu_started = true;
 
        rnp = rdp->mynode;
@@ -4142,6 +4146,8 @@ void rcu_cpu_starting(unsigned int cpu)
        WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
        WARN_ON_ONCE(rnp->ofl_seq & 0x1);
        smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
+ out:
+       raw_spin_unlock(&rcu_startup_lock);
 }
 
 /*