From: David Woodhouse Date: Tue, 16 Feb 2021 15:04:34 +0000 (+0000) Subject: Add locking around rcu_cpu_starting() X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=8e14fb28950f43555acbcac9c5f1054a4b06a45c;p=users%2Fdwmw2%2Flinux.git Add locking around rcu_cpu_starting() If we bring up APs in parallel this doesn't work well without. Signed-off-by: David Woodhouse --- diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 40e5e3dd253e0..a5af3c6ba7d5e 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -4101,6 +4101,8 @@ int rcutree_offline_cpu(unsigned int cpu) * from the incoming CPU rather than from the cpuhp_step mechanism. * This is because this function must be invoked at a precise location. */ +static DEFINE_RAW_SPINLOCK(rcu_startup_lock); + void rcu_cpu_starting(unsigned int cpu) { unsigned long flags; @@ -4109,9 +4111,11 @@ void rcu_cpu_starting(unsigned int cpu) struct rcu_node *rnp; bool newcpu; + raw_spin_lock(&rcu_startup_lock); + rdp = per_cpu_ptr(&rcu_data, cpu); if (rdp->cpu_started) - return; + goto out; rdp->cpu_started = true; rnp = rdp->mynode; @@ -4142,6 +4146,8 @@ void rcu_cpu_starting(unsigned int cpu) WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); WARN_ON_ONCE(rnp->ofl_seq & 0x1); smp_mb(); /* Ensure RCU read-side usage follows above initialization. */ + out: + raw_spin_unlock(&rcu_startup_lock); } /*