]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
rcu: Add mutex for rcu boost kthread spawning and affinity setting
authorDavid Woodhouse <dwmw@amazon.co.uk>
Wed, 8 Dec 2021 23:41:53 +0000 (23:41 +0000)
committerDavid Woodhouse <dwmw@amazon.co.uk>
Thu, 9 Dec 2021 19:22:45 +0000 (19:22 +0000)
As we handle parallel CPU bringup, we will need to take care to avoid
spawning multiple boost threads, or race conditions when setting their
affinity. Spotted by Paul McKenney.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Acked-by: Paul E. McKenney <paulmck@kernel.org>
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_plugin.h

index 2e1ae611be9813b32b3de4252834117c6477f9a3..10836f72ba161ccf6a5a6a463bc4b0c0eed5f894 100644 (file)
@@ -4530,6 +4530,7 @@ static void __init rcu_init_one(void)
                        init_waitqueue_head(&rnp->exp_wq[2]);
                        init_waitqueue_head(&rnp->exp_wq[3]);
                        spin_lock_init(&rnp->exp_lock);
+                       mutex_init(&rnp->boost_kthread_mutex);
                }
        }
 
index aff4cc9303fb469f7a5009589826a86306d9fa93..055e30b3e5e0dfdb57620f242e0a2b5bf716ba54 100644 (file)
@@ -108,6 +108,9 @@ struct rcu_node {
                                /*  side effect, not as a lock. */
        unsigned long boost_time;
                                /* When to start boosting (jiffies). */
+       struct mutex boost_kthread_mutex;
+                               /* Exclusion for thread spawning and affinity */
+                               /*  manipulation. */
        struct task_struct *boost_kthread_task;
                                /* kthread that takes care of priority */
                                /*  boosting for this rcu_node structure. */
index 5199559fbbf0c0922a03c93c461c93156ad6e060..3b4ee0933710b5577a11e3be75ffffda6c5506df 100644 (file)
@@ -1162,15 +1162,16 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
        struct sched_param sp;
        struct task_struct *t;
 
+       mutex_lock(&rnp->boost_kthread_mutex);
        if (rnp->boost_kthread_task || !rcu_scheduler_fully_active)
-               return;
+               goto out;
 
        rcu_state.boost = 1;
 
        t = kthread_create(rcu_boost_kthread, (void *)rnp,
                           "rcub/%d", rnp_index);
        if (WARN_ON_ONCE(IS_ERR(t)))
-               return;
+               goto out;
 
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
        rnp->boost_kthread_task = t;
@@ -1178,6 +1179,9 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
        sp.sched_priority = kthread_prio;
        sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
        wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
+
+ out:
+       mutex_unlock(&rnp->boost_kthread_mutex);
 }
 
 /*
@@ -1200,6 +1204,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
                return;
        if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
                return;
+       mutex_lock(&rnp->boost_kthread_mutex);
        for_each_leaf_node_possible_cpu(rnp, cpu)
                if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
                    cpu != outgoingcpu)
@@ -1207,6 +1212,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
        if (cpumask_weight(cm) == 0)
                cpumask_setall(cm);
        set_cpus_allowed_ptr(t, cm);
+       mutex_unlock(&rnp->boost_kthread_mutex);
        free_cpumask_var(cm);
 }