]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
sched: Don't defer CPU pick to migration_cpu_stop()
authorValentin Schneider <valentin.schneider@arm.com>
Wed, 26 May 2021 20:57:50 +0000 (21:57 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 1 Jun 2021 14:00:11 +0000 (16:00 +0200)
Will reported that the 'XXX __migrate_task() can fail' in migration_cpu_stop()
can happen, and it *is* sort of a big deal. Looking at it some more, one
will note there is a glaring hole in the deferred CPU selection:

  (w/ CONFIG_CPUSET=n, so that the affinity mask passed via taskset doesn't
  get AND'd with cpu_online_mask)

  $ taskset -pc 0-2 $PID
  # offline CPUs 3-4
  $ taskset -pc 3-5 $PID
    `\
      $PID may stay on 0-2 due to the cpumask_any_distribute() picking an
      offline CPU and __migrate_task() refusing to do anything due to
      cpu_is_allowed().

set_cpus_allowed_ptr() goes to some length to pick a dest_cpu that matches
the right constraints vs affinity and the online/active state of the
CPUs. Reuse that instead of discarding it in the affine_move_task() case.

Fixes: 6d337eab041d ("sched: Fix migrate_disable() vs set_cpus_allowed_ptr()")
Reported-by: Will Deacon <will@kernel.org>
Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20210526205751.842360-2-valentin.schneider@arm.com
kernel/sched/core.c

index e205c191b7fba934b62f5e136e993812214f29c9..7e5946698711244bc115f0ad1c2e7716bcc47414 100644 (file)
@@ -2273,7 +2273,6 @@ static int migration_cpu_stop(void *data)
        struct migration_arg *arg = data;
        struct set_affinity_pending *pending = arg->pending;
        struct task_struct *p = arg->task;
-       int dest_cpu = arg->dest_cpu;
        struct rq *rq = this_rq();
        bool complete = false;
        struct rq_flags rf;
@@ -2311,19 +2310,15 @@ static int migration_cpu_stop(void *data)
                if (pending) {
                        p->migration_pending = NULL;
                        complete = true;
-               }
 
-               if (dest_cpu < 0) {
                        if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
                                goto out;
-
-                       dest_cpu = cpumask_any_distribute(&p->cpus_mask);
                }
 
                if (task_on_rq_queued(p))
-                       rq = __migrate_task(rq, &rf, p, dest_cpu);
+                       rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
                else
-                       p->wake_cpu = dest_cpu;
+                       p->wake_cpu = arg->dest_cpu;
 
                /*
                 * XXX __migrate_task() can fail, at which point we might end
@@ -2606,7 +2601,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
                        init_completion(&my_pending.done);
                        my_pending.arg = (struct migration_arg) {
                                .task = p,
-                               .dest_cpu = -1,         /* any */
+                               .dest_cpu = dest_cpu,
                                .pending = &my_pending,
                        };
 
@@ -2614,6 +2609,15 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
                } else {
                        pending = p->migration_pending;
                        refcount_inc(&pending->refs);
+                       /*
+                        * Affinity has changed, but we've already installed a
+                        * pending. migration_cpu_stop() *must* see this, else
+                        * we risk a completion of the pending despite having a
+                        * task on a disallowed CPU.
+                        *
+                        * Serialized by p->pi_lock, so this is safe.
+                        */
+                       pending->arg.dest_cpu = dest_cpu;
                }
        }
        pending = p->migration_pending;